Reformat only

This breaks the compile due to implicit header dependencies

Test: Build 32+64, boot 32 and use camera
Change-Id: Ib13924ca8951cae9a667d19146a3fcee04c435dd
diff --git a/guest/hals/camera/CallbackNotifier.cpp b/guest/hals/camera/CallbackNotifier.cpp
index 5ac5a6b..0c98936 100644
--- a/guest/hals/camera/CallbackNotifier.cpp
+++ b/guest/hals/camera/CallbackNotifier.cpp
@@ -15,36 +15,33 @@
  */
 
 /*
- * Contains implementation of a class CallbackNotifier that manages callbacks set
- * via set_callbacks, enable_msg_type, and disable_msg_type camera HAL API.
+ * Contains implementation of a class CallbackNotifier that manages callbacks
+ * set via set_callbacks, enable_msg_type, and disable_msg_type camera HAL API.
  */
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_CallbackNotifier"
-#include <cutils/log.h>
-#include <MetadataBufferType.h>
-#include "EmulatedCameraDevice.h"
 #include "CallbackNotifier.h"
+#include <MetadataBufferType.h>
+#include <cutils/log.h>
+#include "EmulatedCameraDevice.h"
 #include "ImageMetadata.h"
 #include "JpegCompressor.h"
 
 namespace android {
 
 /* String representation of camera messages. */
-static const char* lCameraMessages[] =
-{
-    "CAMERA_MSG_ERROR",
-    "CAMERA_MSG_SHUTTER",
-    "CAMERA_MSG_FOCUS",
-    "CAMERA_MSG_ZOOM",
-    "CAMERA_MSG_PREVIEW_FRAME",
-    "CAMERA_MSG_VIDEO_FRAME",
-    "CAMERA_MSG_POSTVIEW_FRAME",
-    "CAMERA_MSG_RAW_IMAGE",
-    "CAMERA_MSG_COMPRESSED_IMAGE",
-    "CAMERA_MSG_RAW_IMAGE_NOTIFY",
-    "CAMERA_MSG_PREVIEW_METADATA"
-};
+static const char* lCameraMessages[] = {"CAMERA_MSG_ERROR",
+                                        "CAMERA_MSG_SHUTTER",
+                                        "CAMERA_MSG_FOCUS",
+                                        "CAMERA_MSG_ZOOM",
+                                        "CAMERA_MSG_PREVIEW_FRAME",
+                                        "CAMERA_MSG_VIDEO_FRAME",
+                                        "CAMERA_MSG_POSTVIEW_FRAME",
+                                        "CAMERA_MSG_RAW_IMAGE",
+                                        "CAMERA_MSG_COMPRESSED_IMAGE",
+                                        "CAMERA_MSG_RAW_IMAGE_NOTIFY",
+                                        "CAMERA_MSG_PREVIEW_METADATA"};
 static const int lCameraMessagesNum = sizeof(lCameraMessages) / sizeof(char*);
 
 /* Builds an array of strings for the given set of messages.
@@ -55,34 +52,32 @@
  * Return:
  *  Number of strings saved into the 'strings' array.
  */
-static int GetMessageStrings(uint32_t msg, const char** strings, int max)
-{
-    int index = 0;
-    int out = 0;
-    while (msg != 0 && out < max && index < lCameraMessagesNum) {
-        while ((msg & 0x1) == 0 && index < lCameraMessagesNum) {
-            msg >>= 1;
-            index++;
-        }
-        if ((msg & 0x1) != 0 && index < lCameraMessagesNum) {
-            strings[out] = lCameraMessages[index];
-            out++;
-            msg >>= 1;
-            index++;
-        }
+static int GetMessageStrings(uint32_t msg, const char** strings, int max) {
+  int index = 0;
+  int out = 0;
+  while (msg != 0 && out < max && index < lCameraMessagesNum) {
+    while ((msg & 0x1) == 0 && index < lCameraMessagesNum) {
+      msg >>= 1;
+      index++;
     }
+    if ((msg & 0x1) != 0 && index < lCameraMessagesNum) {
+      strings[out] = lCameraMessages[index];
+      out++;
+      msg >>= 1;
+      index++;
+    }
+  }
 
-    return out;
+  return out;
 }
 
 /* Logs messages, enabled by the mask. */
-static void PrintMessages(uint32_t msg)
-{
-    const char* strs[lCameraMessagesNum];
-    const int translated = GetMessageStrings(msg, strs, lCameraMessagesNum);
-    for (int n = 0; n < translated; n++) {
-        ALOGV("    %s", strs[n]);
-    }
+static void PrintMessages(uint32_t msg) {
+  const char* strs[lCameraMessagesNum];
+  const int translated = GetMessageStrings(msg, strs, lCameraMessagesNum);
+  for (int n = 0; n < translated; n++) {
+    ALOGV("    %s", strs[n]);
+  }
 }
 
 CallbackNotifier::CallbackNotifier()
@@ -96,216 +91,200 @@
       mMessageEnabler(0),
       mJpegQuality(90),
       mVideoRecEnabled(false),
-      mTakingPicture(false)
-{
-}
+      mTakingPicture(false) {}
 
-CallbackNotifier::~CallbackNotifier()
-{
-}
+CallbackNotifier::~CallbackNotifier() {}
 
 /****************************************************************************
  * Camera API
  ***************************************************************************/
 
-void CallbackNotifier::setCallbacks(camera_notify_callback notify_cb,
-                                    camera_data_callback data_cb,
-                                    camera_data_timestamp_callback data_cb_timestamp,
-                                    camera_request_memory get_memory,
-                                    void* user)
-{
-    ALOGV("%s: %p, %p, %p, %p (%p)",
-         __FUNCTION__, notify_cb, data_cb, data_cb_timestamp, get_memory, user);
+void CallbackNotifier::setCallbacks(
+    camera_notify_callback notify_cb, camera_data_callback data_cb,
+    camera_data_timestamp_callback data_cb_timestamp,
+    camera_request_memory get_memory, void* user) {
+  ALOGV("%s: %p, %p, %p, %p (%p)", __FUNCTION__, notify_cb, data_cb,
+        data_cb_timestamp, get_memory, user);
 
-    Mutex::Autolock locker(&mObjectLock);
-    mNotifyCB = notify_cb;
-    mDataCB = data_cb;
-    mDataCBTimestamp = data_cb_timestamp;
-    mGetMemoryCB = get_memory;
-    mCBOpaque = user;
+  Mutex::Autolock locker(&mObjectLock);
+  mNotifyCB = notify_cb;
+  mDataCB = data_cb;
+  mDataCBTimestamp = data_cb_timestamp;
+  mGetMemoryCB = get_memory;
+  mCBOpaque = user;
 }
 
-void CallbackNotifier::enableMessage(uint msg_type)
-{
-    ALOGV("%s: msg_type = 0x%x", __FUNCTION__, msg_type);
-    PrintMessages(msg_type);
+void CallbackNotifier::enableMessage(uint msg_type) {
+  ALOGV("%s: msg_type = 0x%x", __FUNCTION__, msg_type);
+  PrintMessages(msg_type);
 
-    Mutex::Autolock locker(&mObjectLock);
-    mMessageEnabler |= msg_type;
-    ALOGV("**** Currently enabled messages:");
-    PrintMessages(mMessageEnabler);
+  Mutex::Autolock locker(&mObjectLock);
+  mMessageEnabler |= msg_type;
+  ALOGV("**** Currently enabled messages:");
+  PrintMessages(mMessageEnabler);
 }
 
-void CallbackNotifier::disableMessage(uint msg_type)
-{
-    ALOGV("%s: msg_type = 0x%x", __FUNCTION__, msg_type);
-    PrintMessages(msg_type);
+void CallbackNotifier::disableMessage(uint msg_type) {
+  ALOGV("%s: msg_type = 0x%x", __FUNCTION__, msg_type);
+  PrintMessages(msg_type);
 
-    Mutex::Autolock locker(&mObjectLock);
-    mMessageEnabler &= ~msg_type;
-    ALOGV("**** Currently enabled messages:");
-    PrintMessages(mMessageEnabler);
+  Mutex::Autolock locker(&mObjectLock);
+  mMessageEnabler &= ~msg_type;
+  ALOGV("**** Currently enabled messages:");
+  PrintMessages(mMessageEnabler);
 }
 
-status_t CallbackNotifier::enableVideoRecording(int fps)
-{
-    ALOGV("%s: FPS = %d", __FUNCTION__, fps);
+status_t CallbackNotifier::enableVideoRecording(int fps) {
+  ALOGV("%s: FPS = %d", __FUNCTION__, fps);
 
-    Mutex::Autolock locker(&mObjectLock);
-    mVideoRecEnabled = true;
-    mLastFrameTimestamp = 0;
-    mFrameRefreshFreq = 1000000000LL / fps;
+  Mutex::Autolock locker(&mObjectLock);
+  mVideoRecEnabled = true;
+  mLastFrameTimestamp = 0;
+  mFrameRefreshFreq = 1000000000LL / fps;
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
-void CallbackNotifier::disableVideoRecording()
-{
-    ALOGV("%s:", __FUNCTION__);
+void CallbackNotifier::disableVideoRecording() {
+  ALOGV("%s:", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    mVideoRecEnabled = false;
-    mLastFrameTimestamp = 0;
-    mFrameRefreshFreq = 0;
+  Mutex::Autolock locker(&mObjectLock);
+  mVideoRecEnabled = false;
+  mLastFrameTimestamp = 0;
+  mFrameRefreshFreq = 0;
 }
 
-void CallbackNotifier::releaseRecordingFrame(const void* opaque)
-{
-    List<camera_memory_t*>::iterator it = mCameraMemoryTs.begin();
-    for( ; it != mCameraMemoryTs.end(); ++it ) {
-        if ( (*it)->data == opaque ) {
-            (*it)->release( *it );
-            mCameraMemoryTs.erase(it);
-            break;
-        }
+void CallbackNotifier::releaseRecordingFrame(const void* opaque) {
+  List<camera_memory_t*>::iterator it = mCameraMemoryTs.begin();
+  for (; it != mCameraMemoryTs.end(); ++it) {
+    if ((*it)->data == opaque) {
+      (*it)->release(*it);
+      mCameraMemoryTs.erase(it);
+      break;
     }
+  }
 }
 
-status_t CallbackNotifier::storeMetaDataInBuffers(bool enable)
-{
-    // Return error if metadata is request, otherwise silently agree.
-    return enable ? INVALID_OPERATION : NO_ERROR;
+status_t CallbackNotifier::storeMetaDataInBuffers(bool enable) {
+  // Return error if metadata is request, otherwise silently agree.
+  return enable ? INVALID_OPERATION : NO_ERROR;
 }
 
 /****************************************************************************
  * Public API
  ***************************************************************************/
 
-void CallbackNotifier::cleanupCBNotifier()
-{
-    Mutex::Autolock locker(&mObjectLock);
-    mMessageEnabler = 0;
-    mNotifyCB = NULL;
-    mDataCB = NULL;
-    mDataCBTimestamp = NULL;
-    mGetMemoryCB = NULL;
-    mCBOpaque = NULL;
-    mLastFrameTimestamp = 0;
-    mFrameRefreshFreq = 0;
-    mJpegQuality = 90;
-    mVideoRecEnabled = false;
-    mTakingPicture = false;
+void CallbackNotifier::cleanupCBNotifier() {
+  Mutex::Autolock locker(&mObjectLock);
+  mMessageEnabler = 0;
+  mNotifyCB = NULL;
+  mDataCB = NULL;
+  mDataCBTimestamp = NULL;
+  mGetMemoryCB = NULL;
+  mCBOpaque = NULL;
+  mLastFrameTimestamp = 0;
+  mFrameRefreshFreq = 0;
+  mJpegQuality = 90;
+  mVideoRecEnabled = false;
+  mTakingPicture = false;
 }
 
 void CallbackNotifier::onNextFrameAvailable(const void* frame,
                                             nsecs_t timestamp,
-                                            EmulatedCameraDevice* camera_dev)
-{
-    if (isMessageEnabled(CAMERA_MSG_VIDEO_FRAME) && isVideoRecordingEnabled() &&
-            isNewVideoFrameTime(timestamp)) {
-        camera_memory_t* cam_buff =
-            mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, mCBOpaque);
-        if (NULL != cam_buff && NULL != cam_buff->data) {
-            memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
-            mDataCBTimestamp(timestamp, CAMERA_MSG_VIDEO_FRAME,
-                               cam_buff, 0, mCBOpaque);
+                                            EmulatedCameraDevice* camera_dev) {
+  if (isMessageEnabled(CAMERA_MSG_VIDEO_FRAME) && isVideoRecordingEnabled() &&
+      isNewVideoFrameTime(timestamp)) {
+    camera_memory_t* cam_buff =
+        mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, mCBOpaque);
+    if (NULL != cam_buff && NULL != cam_buff->data) {
+      memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
+      mDataCBTimestamp(timestamp, CAMERA_MSG_VIDEO_FRAME, cam_buff, 0,
+                       mCBOpaque);
 
-            mCameraMemoryTs.push_back( cam_buff );
-        } else {
+      mCameraMemoryTs.push_back(cam_buff);
+    } else {
+      ALOGE("%s: Memory failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
+    }
+  }
+
+  if (isMessageEnabled(CAMERA_MSG_PREVIEW_FRAME)) {
+    camera_memory_t* cam_buff =
+        mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, mCBOpaque);
+    if (NULL != cam_buff && NULL != cam_buff->data) {
+      memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
+      mDataCB(CAMERA_MSG_PREVIEW_FRAME, cam_buff, 0, NULL, mCBOpaque);
+      cam_buff->release(cam_buff);
+    } else {
+      ALOGE("%s: Memory failure in CAMERA_MSG_PREVIEW_FRAME", __FUNCTION__);
+    }
+  }
+
+  if (mTakingPicture) {
+    /* This happens just once. */
+    mTakingPicture = false;
+    /* The sequence of callbacks during picture taking is:
+     *  - CAMERA_MSG_SHUTTER
+     *  - CAMERA_MSG_RAW_IMAGE_NOTIFY
+     *  - CAMERA_MSG_COMPRESSED_IMAGE
+     */
+    if (isMessageEnabled(CAMERA_MSG_SHUTTER)) {
+      mNotifyCB(CAMERA_MSG_SHUTTER, 0, 0, mCBOpaque);
+    }
+    if (isMessageEnabled(CAMERA_MSG_RAW_IMAGE_NOTIFY)) {
+      mNotifyCB(CAMERA_MSG_RAW_IMAGE_NOTIFY, 0, 0, mCBOpaque);
+    }
+    if (isMessageEnabled(CAMERA_MSG_COMPRESSED_IMAGE)) {
+      /* Compress the frame to JPEG. Note that when taking pictures, we
+       * have requested camera device to provide us with NV21 frames. */
+      NV21JpegCompressor compressor;
+      struct ::ImageMetadata meta;
+      status_t res = camera_dev->getImageMetadata(&meta);
+      if (res == NO_ERROR) {
+        res = compressor.compressRawImage(frame, &meta, mJpegQuality);
+        if (res == NO_ERROR) {
+          camera_memory_t* jpeg_buff =
+              mGetMemoryCB(-1, compressor.getCompressedSize(), 1, mCBOpaque);
+          if (NULL != jpeg_buff && NULL != jpeg_buff->data) {
+            compressor.getCompressedImage(jpeg_buff->data);
+            mDataCB(CAMERA_MSG_COMPRESSED_IMAGE, jpeg_buff, 0, NULL, mCBOpaque);
+            jpeg_buff->release(jpeg_buff);
+          } else {
             ALOGE("%s: Memory failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
-        }
-    }
-
-    if (isMessageEnabled(CAMERA_MSG_PREVIEW_FRAME)) {
-        camera_memory_t* cam_buff =
-            mGetMemoryCB(-1, camera_dev->getFrameBufferSize(), 1, mCBOpaque);
-        if (NULL != cam_buff && NULL != cam_buff->data) {
-            memcpy(cam_buff->data, frame, camera_dev->getFrameBufferSize());
-            mDataCB(CAMERA_MSG_PREVIEW_FRAME, cam_buff, 0, NULL, mCBOpaque);
-            cam_buff->release(cam_buff);
+          }
         } else {
-            ALOGE("%s: Memory failure in CAMERA_MSG_PREVIEW_FRAME", __FUNCTION__);
+          ALOGE("%s: Compression failure in CAMERA_MSG_VIDEO_FRAME",
+                __FUNCTION__);
         }
+      } else {
+        ALOGE("%s: Image Metadata acquisition failure.", __FUNCTION__);
+      }
     }
-
-    if (mTakingPicture) {
-        /* This happens just once. */
-        mTakingPicture = false;
-        /* The sequence of callbacks during picture taking is:
-         *  - CAMERA_MSG_SHUTTER
-         *  - CAMERA_MSG_RAW_IMAGE_NOTIFY
-         *  - CAMERA_MSG_COMPRESSED_IMAGE
-         */
-        if (isMessageEnabled(CAMERA_MSG_SHUTTER)) {
-            mNotifyCB(CAMERA_MSG_SHUTTER, 0, 0, mCBOpaque);
-        }
-        if (isMessageEnabled(CAMERA_MSG_RAW_IMAGE_NOTIFY)) {
-            mNotifyCB(CAMERA_MSG_RAW_IMAGE_NOTIFY, 0, 0, mCBOpaque);
-        }
-        if (isMessageEnabled(CAMERA_MSG_COMPRESSED_IMAGE)) {
-            /* Compress the frame to JPEG. Note that when taking pictures, we
-             * have requested camera device to provide us with NV21 frames. */
-            NV21JpegCompressor compressor;
-            struct ::ImageMetadata meta;
-            status_t res =
-                camera_dev->getImageMetadata(&meta);
-            if (res == NO_ERROR) {
-                res = compressor.compressRawImage(frame, &meta, mJpegQuality);
-                if (res == NO_ERROR) {
-                    camera_memory_t* jpeg_buff =
-                        mGetMemoryCB(-1, compressor.getCompressedSize(), 1, mCBOpaque);
-                    if (NULL != jpeg_buff && NULL != jpeg_buff->data) {
-                        compressor.getCompressedImage(jpeg_buff->data);
-                        mDataCB(CAMERA_MSG_COMPRESSED_IMAGE, jpeg_buff, 0, NULL, mCBOpaque);
-                        jpeg_buff->release(jpeg_buff);
-                    } else {
-                        ALOGE("%s: Memory failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
-                    }
-                } else {
-                    ALOGE("%s: Compression failure in CAMERA_MSG_VIDEO_FRAME", __FUNCTION__);
-                }
-            } else {
-                ALOGE("%s: Image Metadata acquisition failure.", __FUNCTION__);
-            }
-        }
-    }
+  }
 }
 
-void CallbackNotifier::onCameraDeviceError(int err)
-{
-    if (isMessageEnabled(CAMERA_MSG_ERROR) && mNotifyCB != NULL) {
-        mNotifyCB(CAMERA_MSG_ERROR, err, 0, mCBOpaque);
-    }
+void CallbackNotifier::onCameraDeviceError(int err) {
+  if (isMessageEnabled(CAMERA_MSG_ERROR) && mNotifyCB != NULL) {
+    mNotifyCB(CAMERA_MSG_ERROR, err, 0, mCBOpaque);
+  }
 }
 
 void CallbackNotifier::onCameraFocusAcquired() {
-    if (isMessageEnabled(CAMERA_MSG_FOCUS) && mNotifyCB != NULL) {
-        mNotifyCB(CAMERA_MSG_FOCUS, 1, 0, mCBOpaque);
-    }
+  if (isMessageEnabled(CAMERA_MSG_FOCUS) && mNotifyCB != NULL) {
+    mNotifyCB(CAMERA_MSG_FOCUS, 1, 0, mCBOpaque);
+  }
 }
 
 /****************************************************************************
  * Private API
  ***************************************************************************/
 
-bool CallbackNotifier::isNewVideoFrameTime(nsecs_t timestamp)
-{
-    Mutex::Autolock locker(&mObjectLock);
-    if ((timestamp - mLastFrameTimestamp) >= mFrameRefreshFreq) {
-        mLastFrameTimestamp = timestamp;
-        return true;
-    }
-    return false;
+bool CallbackNotifier::isNewVideoFrameTime(nsecs_t timestamp) {
+  Mutex::Autolock locker(&mObjectLock);
+  if ((timestamp - mLastFrameTimestamp) >= mFrameRefreshFreq) {
+    mLastFrameTimestamp = timestamp;
+    return true;
+  }
+  return false;
 }
 
 }; /* namespace android */
diff --git a/guest/hals/camera/CallbackNotifier.h b/guest/hals/camera/CallbackNotifier.h
index 569ca57..47b101c 100644
--- a/guest/hals/camera/CallbackNotifier.h
+++ b/guest/hals/camera/CallbackNotifier.h
@@ -28,8 +28,8 @@
 
 class EmulatedCameraDevice;
 
-/* Manages callbacks set via set_callbacks, enable_msg_type, and disable_msg_type
- * camera HAL API.
+/* Manages callbacks set via set_callbacks, enable_msg_type, and
+ * disable_msg_type camera HAL API.
  *
  * Objects of this class are contained in EmulatedCamera objects, and handle
  * relevant camera API callbacks.
@@ -38,205 +38,193 @@
  * which will cause a deadlock.
  */
 class CallbackNotifier {
-public:
-    /* Constructs CallbackNotifier instance. */
-    CallbackNotifier();
+ public:
+  /* Constructs CallbackNotifier instance. */
+  CallbackNotifier();
 
-    /* Destructs CallbackNotifier instance. */
-    ~CallbackNotifier();
+  /* Destructs CallbackNotifier instance. */
+  ~CallbackNotifier();
 
-    /****************************************************************************
-     * Camera API
-     ***************************************************************************/
+  /****************************************************************************
+   * Camera API
+   ***************************************************************************/
 
-public:
-    /* Actual handler for camera_device_ops_t::set_callbacks callback.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::set_callbacks callback.
-     */
-    void setCallbacks(camera_notify_callback notify_cb,
-                      camera_data_callback data_cb,
-                      camera_data_timestamp_callback data_cb_timestamp,
-                      camera_request_memory get_memory,
-                      void* user);
+ public:
+  /* Actual handler for camera_device_ops_t::set_callbacks callback.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::set_callbacks callback.
+   */
+  void setCallbacks(camera_notify_callback notify_cb,
+                    camera_data_callback data_cb,
+                    camera_data_timestamp_callback data_cb_timestamp,
+                    camera_request_memory get_memory, void* user);
 
-    /* Actual handler for camera_device_ops_t::enable_msg_type callback.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::enable_msg_type callback.
-     */
-    void enableMessage(uint msg_type);
+  /* Actual handler for camera_device_ops_t::enable_msg_type callback.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::enable_msg_type callback.
+   */
+  void enableMessage(uint msg_type);
 
-    /* Actual handler for camera_device_ops_t::disable_msg_type callback.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::disable_msg_type callback.
-     */
-    void disableMessage(uint msg_type);
+  /* Actual handler for camera_device_ops_t::disable_msg_type callback.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::disable_msg_type callback.
+   */
+  void disableMessage(uint msg_type);
 
-    /* Actual handler for camera_device_ops_t::store_meta_data_in_buffers
-     * callback. This method is called by the containing emulated camera object
-     * when it is handing the camera_device_ops_t::store_meta_data_in_buffers
-     * callback.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    status_t storeMetaDataInBuffers(bool enable);
+  /* Actual handler for camera_device_ops_t::store_meta_data_in_buffers
+   * callback. This method is called by the containing emulated camera object
+   * when it is handing the camera_device_ops_t::store_meta_data_in_buffers
+   * callback.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  status_t storeMetaDataInBuffers(bool enable);
 
-    /* Enables video recording.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::start_recording callback.
-     * Param:
-     *  fps - Video frame frequency. This parameter determins when a frame
-     *      received via onNextFrameAvailable call will be pushed through the
-     *      callback.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    status_t enableVideoRecording(int fps);
+  /* Enables video recording.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::start_recording callback.
+   * Param:
+   *  fps - Video frame frequency. This parameter determins when a frame
+   *      received via onNextFrameAvailable call will be pushed through the
+   *      callback.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  status_t enableVideoRecording(int fps);
 
-    /* Disables video recording.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::stop_recording callback.
-     */
-    void disableVideoRecording();
+  /* Disables video recording.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::stop_recording callback.
+   */
+  void disableVideoRecording();
 
-    /* Releases video frame, sent to the framework.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::release_recording_frame callback.
-     */
-    void releaseRecordingFrame(const void* opaque);
+  /* Releases video frame, sent to the framework.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::release_recording_frame callback.
+   */
+  void releaseRecordingFrame(const void* opaque);
 
-    /* Actual handler for camera_device_ops_t::msg_type_enabled callback.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::msg_type_enabled callback.
-     * Note: this method doesn't grab a lock while checking message status, since
-     * upon exit the status would be undefined anyway. So, grab a lock before
-     * calling this method if you care about persisting a defined message status.
-     * Return:
-     *  0 if message is disabled, or non-zero value, if message is enabled.
-     */
-    inline int isMessageEnabled(uint msg_type)
-    {
-        return mMessageEnabler & msg_type;
-    }
+  /* Actual handler for camera_device_ops_t::msg_type_enabled callback.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::msg_type_enabled callback.
+   * Note: this method doesn't grab a lock while checking message status, since
+   * upon exit the status would be undefined anyway. So, grab a lock before
+   * calling this method if you care about persisting a defined message status.
+   * Return:
+   *  0 if message is disabled, or non-zero value, if message is enabled.
+   */
+  inline int isMessageEnabled(uint msg_type) {
+    return mMessageEnabler & msg_type;
+  }
 
-    /* Checks id video recording is enabled.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::recording_enabled callback.
-     * Note: this method doesn't grab a lock while checking video recordin status,
-     * since upon exit the status would be undefined anyway. So, grab a lock
-     * before calling this method if you care about persisting of a defined video
-     * recording status.
-     * Return:
-     *  true if video recording is enabled, or false if it is disabled.
-     */
-    inline bool isVideoRecordingEnabled()
-    {
-        return mVideoRecEnabled;
-    }
+  /* Checks id video recording is enabled.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::recording_enabled callback.
+   * Note: this method doesn't grab a lock while checking video recordin status,
+   * since upon exit the status would be undefined anyway. So, grab a lock
+   * before calling this method if you care about persisting of a defined video
+   * recording status.
+   * Return:
+   *  true if video recording is enabled, or false if it is disabled.
+   */
+  inline bool isVideoRecordingEnabled() { return mVideoRecEnabled; }
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-public:
-    /* Resets the callback notifier. */
-    void cleanupCBNotifier();
+ public:
+  /* Resets the callback notifier. */
+  void cleanupCBNotifier();
 
-    /* Next frame is available in the camera device.
-     * This is a notification callback that is invoked by the camera device when
-     * a new frame is available.
-     * Note that most likely this method is called in context of a worker thread
-     * that camera device has created for frame capturing.
-     * Param:
-     *  frame - Captured frame, or NULL if camera device didn't pull the frame
-     *      yet. If NULL is passed in this parameter use GetCurrentFrame method
-     *      of the camera device class to obtain the next frame. Also note that
-     *      the size of the frame that is passed here (as well as the frame
-     *      returned from the GetCurrentFrame method) is defined by the current
-     *      frame settings (width + height + pixel format) for the camera device.
-     * timestamp - Frame's timestamp.
-     * camera_dev - Camera device instance that delivered the frame.
-     */
-    void onNextFrameAvailable(const void* frame,
-                              nsecs_t timestamp,
-                              EmulatedCameraDevice* camera_dev);
+  /* Next frame is available in the camera device.
+   * This is a notification callback that is invoked by the camera device when
+   * a new frame is available.
+   * Note that most likely this method is called in context of a worker thread
+   * that camera device has created for frame capturing.
+   * Param:
+   *  frame - Captured frame, or NULL if camera device didn't pull the frame
+   *      yet. If NULL is passed in this parameter use GetCurrentFrame method
+   *      of the camera device class to obtain the next frame. Also note that
+   *      the size of the frame that is passed here (as well as the frame
+   *      returned from the GetCurrentFrame method) is defined by the current
+   *      frame settings (width + height + pixel format) for the camera device.
+   * timestamp - Frame's timestamp.
+   * camera_dev - Camera device instance that delivered the frame.
+   */
+  void onNextFrameAvailable(const void* frame, nsecs_t timestamp,
+                            EmulatedCameraDevice* camera_dev);
 
-    /* Entry point for notifications that occur in camera device.
-     * Param:
-     *  err - CAMERA_ERROR_XXX error code.
-     */
-    void onCameraDeviceError(int err);
+  /* Entry point for notifications that occur in camera device.
+   * Param:
+   *  err - CAMERA_ERROR_XXX error code.
+   */
+  void onCameraDeviceError(int err);
 
-    /* Reports focus operation completion to camera client.
-     */
-    void onCameraFocusAcquired();
+  /* Reports focus operation completion to camera client.
+   */
+  void onCameraFocusAcquired();
 
-    /* Sets, or resets taking picture state.
-     * This state control whether or not to notify the framework about compressed
-     * image, shutter, and other picture related events.
-     */
-    void setTakingPicture(bool taking)
-    {
-        mTakingPicture = taking;
-    }
+  /* Sets, or resets taking picture state.
+   * This state control whether or not to notify the framework about compressed
+   * image, shutter, and other picture related events.
+   */
+  void setTakingPicture(bool taking) { mTakingPicture = taking; }
 
-    /* Sets JPEG quality used to compress frame during picture taking. */
-    void setJpegQuality(int jpeg_quality)
-    {
-        mJpegQuality = jpeg_quality;
-    }
+  /* Sets JPEG quality used to compress frame during picture taking. */
+  void setJpegQuality(int jpeg_quality) { mJpegQuality = jpeg_quality; }
 
-    /****************************************************************************
-     * Private API
-     ***************************************************************************/
+  /****************************************************************************
+   * Private API
+   ***************************************************************************/
 
-protected:
-    /* Checks if it's time to push new video frame.
-     * Note that this method must be called while object is locked.
-     * Param:
-     *  timestamp - Timestamp for the new frame. */
-    bool isNewVideoFrameTime(nsecs_t timestamp);
+ protected:
+  /* Checks if it's time to push new video frame.
+   * Note that this method must be called while object is locked.
+   * Param:
+   *  timestamp - Timestamp for the new frame. */
+  bool isNewVideoFrameTime(nsecs_t timestamp);
 
-    /****************************************************************************
-     * Data members
-     ***************************************************************************/
+  /****************************************************************************
+   * Data members
+   ***************************************************************************/
 
-protected:
-    /* Locks this instance for data change. */
-    Mutex                           mObjectLock;
+ protected:
+  /* Locks this instance for data change. */
+  Mutex mObjectLock;
 
-    /*
-     * Callbacks, registered in set_callbacks.
-     */
+  /*
+   * Callbacks, registered in set_callbacks.
+   */
 
-    camera_notify_callback          mNotifyCB;
-    camera_data_callback            mDataCB;
-    camera_data_timestamp_callback  mDataCBTimestamp;
-    camera_request_memory           mGetMemoryCB;
-    void*                           mCBOpaque;
+  camera_notify_callback mNotifyCB;
+  camera_data_callback mDataCB;
+  camera_data_timestamp_callback mDataCBTimestamp;
+  camera_request_memory mGetMemoryCB;
+  void* mCBOpaque;
 
-    /* video frame queue for the CameraHeapMemory destruction */
-    List<camera_memory_t*>          mCameraMemoryTs;
+  /* video frame queue for the CameraHeapMemory destruction */
+  List<camera_memory_t*> mCameraMemoryTs;
 
-    /* Timestamp when last frame has been delivered to the framework. */
-    nsecs_t                         mLastFrameTimestamp;
+  /* Timestamp when last frame has been delivered to the framework. */
+  nsecs_t mLastFrameTimestamp;
 
-    /* Video frequency in nanosec. */
-    nsecs_t                         mFrameRefreshFreq;
+  /* Video frequency in nanosec. */
+  nsecs_t mFrameRefreshFreq;
 
-    /* Message enabler. */
-    uint32_t                        mMessageEnabler;
+  /* Message enabler. */
+  uint32_t mMessageEnabler;
 
-    /* JPEG quality used to compress frame during picture taking. */
-    int                             mJpegQuality;
+  /* JPEG quality used to compress frame during picture taking. */
+  int mJpegQuality;
 
-    /* Video recording status. */
-    bool                            mVideoRecEnabled;
+  /* Video recording status. */
+  bool mVideoRecEnabled;
 
-    /* Picture taking status. */
-    bool                            mTakingPicture;
+  /* Picture taking status. */
+  bool mTakingPicture;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_CALLBACK_NOTIFIER_H */
+#endif /* HW_EMULATOR_CAMERA_CALLBACK_NOTIFIER_H */
diff --git a/guest/hals/camera/CameraConfiguration.cpp b/guest/hals/camera/CameraConfiguration.cpp
index 8023045..ed16089 100644
--- a/guest/hals/camera/CameraConfiguration.cpp
+++ b/guest/hals/camera/CameraConfiguration.cpp
@@ -97,9 +97,8 @@
 const char* const kCameraDefinitionResolutionHeightKey = "height";
 
 // Convert string value to camera orientation.
-bool ValueToCameraOrientation(
-    const std::string& value,
-    CameraDefinition::Orientation* orientation) {
+bool ValueToCameraOrientation(const std::string& value,
+                              CameraDefinition::Orientation* orientation) {
   if (value == "back") {
     *orientation = CameraDefinition::kBack;
     return true;
@@ -107,15 +106,13 @@
     *orientation = CameraDefinition::kFront;
     return true;
   }
-  ALOGE("%s: Invalid camera orientation: %s.",
-        __FUNCTION__, value.c_str());
+  ALOGE("%s: Invalid camera orientation: %s.", __FUNCTION__, value.c_str());
   return false;
 }
 
 // Convert string value to camera HAL version.
-bool ValueToCameraHalVersion(
-    const std::string& value,
-    CameraDefinition::HalVersion* hal_version) {
+bool ValueToCameraHalVersion(const std::string& value,
+                             CameraDefinition::HalVersion* hal_version) {
   int temp;
   char* endptr;
 
@@ -148,9 +145,9 @@
   return true;
 }
 
-bool ValueToCameraResolution(
-    const std::string& width, const std::string& height,
-    CameraDefinition::Resolution* resolution) {
+bool ValueToCameraResolution(const std::string& width,
+                             const std::string& height,
+                             CameraDefinition::Resolution* resolution) {
   char* endptr;
 
   resolution->width = strtol(width.c_str(), &endptr, 10);
@@ -176,10 +173,11 @@
 
   // Validate width and height divisible by 8.
   if ((resolution->width & 7) != 0 || (resolution->height & 7) != 0) {
-    ALOGE("%s: Invalid camera resolution: width and height must be "
-          "divisible by 8, got %dx%d (%dx%d).", __FUNCTION__,
-          resolution->width, resolution->height,
-          resolution->width & 7, resolution->height & 7);
+    ALOGE(
+        "%s: Invalid camera resolution: width and height must be "
+        "divisible by 8, got %dx%d (%dx%d).",
+        __FUNCTION__, resolution->width, resolution->height,
+        resolution->width & 7, resolution->height & 7);
     return false;
   }
 
@@ -188,9 +186,8 @@
 
 // Process camera definitions.
 // Returns true, if definitions were sane.
-bool ConfigureCameras(
-    const Json::Value& value,
-    std::vector<CameraDefinition>* cameras) {
+bool ConfigureCameras(const Json::Value& value,
+                      std::vector<CameraDefinition>* cameras) {
   if (!value.isObject()) {
     ALOGE("%s: Configuration root is not an object", __FUNCTION__);
     return false;
@@ -198,8 +195,7 @@
 
   if (!value.isMember(kCameraDefinitionsKey)) return true;
   for (Json::ValueConstIterator iter = value[kCameraDefinitionsKey].begin();
-       iter != value[kCameraDefinitionsKey].end();
-       ++iter) {
+       iter != value[kCameraDefinitionsKey].end(); ++iter) {
     cameras->push_back(CameraDefinition());
     CameraDefinition& camera = cameras->back();
 
@@ -210,30 +206,32 @@
 
     // Camera without orientation -> invalid setting.
     if (!iter->isMember(kCameraDefinitionOrientationKey)) {
-      ALOGE("%s: Invalid camera definition: key %s is missing.",
-            __FUNCTION__, kCameraDefinitionOrientationKey);
+      ALOGE("%s: Invalid camera definition: key %s is missing.", __FUNCTION__,
+            kCameraDefinitionOrientationKey);
       return false;
     }
 
     if (!ValueToCameraOrientation(
-        (*iter)[kCameraDefinitionOrientationKey].asString(),
-        &camera.orientation)) return false;
+            (*iter)[kCameraDefinitionOrientationKey].asString(),
+            &camera.orientation))
+      return false;
 
     // Camera without HAL version -> invalid setting.
     if (!(*iter).isMember(kCameraDefinitionHalVersionKey)) {
-      ALOGE("%s: Invalid camera definition: key %s is missing.",
-            __FUNCTION__, kCameraDefinitionHalVersionKey);
+      ALOGE("%s: Invalid camera definition: key %s is missing.", __FUNCTION__,
+            kCameraDefinitionHalVersionKey);
       return false;
     }
 
     if (!ValueToCameraHalVersion(
-        (*iter)[kCameraDefinitionHalVersionKey].asString(),
-        &camera.hal_version)) return false;
+            (*iter)[kCameraDefinitionHalVersionKey].asString(),
+            &camera.hal_version))
+      return false;
 
     // Camera without resolutions -> invalid setting.
     if (!iter->isMember(kCameraDefinitionResolutionsKey)) {
-      ALOGE("%s: Invalid camera definition: key %s is missing.",
-            __FUNCTION__, kCameraDefinitionResolutionsKey);
+      ALOGE("%s: Invalid camera definition: key %s is missing.", __FUNCTION__,
+            kCameraDefinitionResolutionsKey);
       return false;
     }
 
@@ -249,8 +247,7 @@
 
     // Process all resolutions.
     for (Json::ValueConstIterator json_res_iter = json_resolutions.begin();
-         json_res_iter != json_resolutions.end();
-         ++json_res_iter) {
+         json_res_iter != json_resolutions.end(); ++json_res_iter) {
       // Check presence of width and height keys.
       if (!json_res_iter->isObject()) {
         ALOGE("%s: Camera resolution item is not an object", __FUNCTION__);
@@ -258,10 +255,10 @@
       }
       if (!json_res_iter->isMember(kCameraDefinitionResolutionWidthKey) ||
           !json_res_iter->isMember(kCameraDefinitionResolutionHeightKey)) {
-        ALOGE("%s: Invalid camera resolution: keys %s and %s are both required.",
-              __FUNCTION__,
-              kCameraDefinitionResolutionWidthKey,
-              kCameraDefinitionResolutionHeightKey);
+        ALOGE(
+            "%s: Invalid camera resolution: keys %s and %s are both required.",
+            __FUNCTION__, kCameraDefinitionResolutionWidthKey,
+            kCameraDefinitionResolutionHeightKey);
         return false;
       }
 
@@ -269,9 +266,10 @@
       CameraDefinition::Resolution& resolution = camera.resolutions.back();
 
       if (!ValueToCameraResolution(
-          (*json_res_iter)[kCameraDefinitionResolutionWidthKey].asString(),
-          (*json_res_iter)[kCameraDefinitionResolutionHeightKey].asString(),
-          &resolution)) return false;
+              (*json_res_iter)[kCameraDefinitionResolutionWidthKey].asString(),
+              (*json_res_iter)[kCameraDefinitionResolutionHeightKey].asString(),
+              &resolution))
+        return false;
     }
   }
 
@@ -282,10 +280,9 @@
 bool CameraConfiguration::Init() {
   cameras_.clear();
   std::string config;
-  if (!android::base::ReadFileToString(
-      kConfigurationFileLocation, &config)) {
-    ALOGE("%s: Could not open configuration file: %s",
-          __FUNCTION__, kConfigurationFileLocation);
+  if (!android::base::ReadFileToString(kConfigurationFileLocation, &config)) {
+    ALOGE("%s: Could not open configuration file: %s", __FUNCTION__,
+          kConfigurationFileLocation);
     return false;
   }
 
@@ -301,5 +298,3 @@
 }
 
 }  // namespace cvd
-
-
diff --git a/guest/hals/camera/CameraConfiguration.h b/guest/hals/camera/CameraConfiguration.h
index 90aa087..9058648 100644
--- a/guest/hals/camera/CameraConfiguration.h
+++ b/guest/hals/camera/CameraConfiguration.h
@@ -23,17 +23,10 @@
 // Camera properties and features.
 struct CameraDefinition {
   // Camera facing direction.
-  enum Orientation {
-    kFront,
-    kBack
-  };
+  enum Orientation { kFront, kBack };
 
   // Camera recognized HAL versions.
-  enum HalVersion {
-    kHalV1,
-    kHalV2,
-    kHalV3
-  };
+  enum HalVersion { kHalV1, kHalV2, kHalV3 };
 
   struct Resolution {
     int width;
@@ -50,9 +43,7 @@
   CameraConfiguration() {}
   ~CameraConfiguration() {}
 
-  const std::vector<CameraDefinition>& cameras() const {
-    return cameras_;
-  }
+  const std::vector<CameraDefinition>& cameras() const { return cameras_; }
 
   bool Init();
 
diff --git a/guest/hals/camera/Converters.cpp b/guest/hals/camera/Converters.cpp
index f63f67f..fdbb57d 100644
--- a/guest/hals/camera/Converters.cpp
+++ b/guest/hals/camera/Converters.cpp
@@ -20,154 +20,131 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_Converter"
-#include <cutils/log.h>
 #include "Converters.h"
+#include <cutils/log.h>
 
 namespace android {
 
-static void _YUV420SToRGB565(const uint8_t* Y,
-                             const uint8_t* U,
-                             const uint8_t* V,
-                             int dUV,
-                             uint16_t* rgb,
-                             int width,
-                             int height)
-{
-    const uint8_t* U_pos = U;
-    const uint8_t* V_pos = V;
+static void _YUV420SToRGB565(const uint8_t* Y, const uint8_t* U,
+                             const uint8_t* V, int dUV, uint16_t* rgb,
+                             int width, int height) {
+  const uint8_t* U_pos = U;
+  const uint8_t* V_pos = V;
 
-    for (int y = 0; y < height; y++) {
-        for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
-            const uint8_t nU = *U;
-            const uint8_t nV = *V;
-            *rgb = YUVToRGB565(*Y, nU, nV);
-            Y++; rgb++;
-            *rgb = YUVToRGB565(*Y, nU, nV);
-            Y++; rgb++;
-        }
-        if (y & 0x1) {
-            U_pos = U;
-            V_pos = V;
-        } else {
-            U = U_pos;
-            V = V_pos;
-        }
+  for (int y = 0; y < height; y++) {
+    for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
+      const uint8_t nU = *U;
+      const uint8_t nV = *V;
+      *rgb = YUVToRGB565(*Y, nU, nV);
+      Y++;
+      rgb++;
+      *rgb = YUVToRGB565(*Y, nU, nV);
+      Y++;
+      rgb++;
     }
-}
-
-static void _YUV420SToRGB32(const uint8_t* Y,
-                            const uint8_t* U,
-                            const uint8_t* V,
-                            int dUV,
-                            uint32_t* rgb,
-                            int width,
-                            int height)
-{
-    const uint8_t* U_pos = U;
-    const uint8_t* V_pos = V;
-
-    for (int y = 0; y < height; y++) {
-        for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
-            const uint8_t nU = *U;
-            const uint8_t nV = *V;
-            *rgb = YUVToRGB32(*Y, nU, nV);
-            Y++; rgb++;
-            *rgb = YUVToRGB32(*Y, nU, nV);
-            Y++; rgb++;
-        }
-        if (y & 0x1) {
-            U_pos = U;
-            V_pos = V;
-        } else {
-            U = U_pos;
-            V = V_pos;
-        }
+    if (y & 0x1) {
+      U_pos = U;
+      V_pos = V;
+    } else {
+      U = U_pos;
+      V = V_pos;
     }
+  }
 }
 
-void YV12ToRGB565(const void* yv12, void* rgb, int width, int height)
-{
-    const int pix_total = width * height;
-    const uint8_t* Y = reinterpret_cast<const uint8_t*>(yv12);
-    const uint8_t* U = Y + pix_total;
-    const uint8_t* V = U + pix_total / 4;
-    _YUV420SToRGB565(Y, U, V, 1, reinterpret_cast<uint16_t*>(rgb), width, height);
+static void _YUV420SToRGB32(const uint8_t* Y, const uint8_t* U,
+                            const uint8_t* V, int dUV, uint32_t* rgb, int width,
+                            int height) {
+  const uint8_t* U_pos = U;
+  const uint8_t* V_pos = V;
+
+  for (int y = 0; y < height; y++) {
+    for (int x = 0; x < width; x += 2, U += dUV, V += dUV) {
+      const uint8_t nU = *U;
+      const uint8_t nV = *V;
+      *rgb = YUVToRGB32(*Y, nU, nV);
+      Y++;
+      rgb++;
+      *rgb = YUVToRGB32(*Y, nU, nV);
+      Y++;
+      rgb++;
+    }
+    if (y & 0x1) {
+      U_pos = U;
+      V_pos = V;
+    } else {
+      U = U_pos;
+      V = V_pos;
+    }
+  }
 }
 
-void YV12ToRGB32(const void* yv12, void* rgb, int width, int height)
-{
-    const int pix_total = width * height;
-    const uint8_t* Y = reinterpret_cast<const uint8_t*>(yv12);
-    const uint8_t* V = Y + pix_total;
-    const uint8_t* U = V + pix_total / 4;
-    _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height);
+void YV12ToRGB565(const void* yv12, void* rgb, int width, int height) {
+  const int pix_total = width * height;
+  const uint8_t* Y = reinterpret_cast<const uint8_t*>(yv12);
+  const uint8_t* U = Y + pix_total;
+  const uint8_t* V = U + pix_total / 4;
+  _YUV420SToRGB565(Y, U, V, 1, reinterpret_cast<uint16_t*>(rgb), width, height);
 }
 
-void YU12ToRGB32(const void* yu12, void* rgb, int width, int height)
-{
-    const int pix_total = width * height;
-    const uint8_t* Y = reinterpret_cast<const uint8_t*>(yu12);
-    const uint8_t* U = Y + pix_total;
-    const uint8_t* V = U + pix_total / 4;
-    _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height);
+void YV12ToRGB32(const void* yv12, void* rgb, int width, int height) {
+  const int pix_total = width * height;
+  const uint8_t* Y = reinterpret_cast<const uint8_t*>(yv12);
+  const uint8_t* V = Y + pix_total;
+  const uint8_t* U = V + pix_total / 4;
+  _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height);
+}
+
+void YU12ToRGB32(const void* yu12, void* rgb, int width, int height) {
+  const int pix_total = width * height;
+  const uint8_t* Y = reinterpret_cast<const uint8_t*>(yu12);
+  const uint8_t* U = Y + pix_total;
+  const uint8_t* V = U + pix_total / 4;
+  _YUV420SToRGB32(Y, U, V, 1, reinterpret_cast<uint32_t*>(rgb), width, height);
 }
 
 /* Common converter for YUV 4:2:0 interleaved to RGB565.
  * y, u, and v point to Y,U, and V panes, where U and V values are interleaved.
  */
-static void _NVXXToRGB565(const uint8_t* Y,
-                          const uint8_t* U,
-                          const uint8_t* V,
-                          uint16_t* rgb,
-                          int width,
-                          int height)
-{
-    _YUV420SToRGB565(Y, U, V, 2, rgb, width, height);
+static void _NVXXToRGB565(const uint8_t* Y, const uint8_t* U, const uint8_t* V,
+                          uint16_t* rgb, int width, int height) {
+  _YUV420SToRGB565(Y, U, V, 2, rgb, width, height);
 }
 
 /* Common converter for YUV 4:2:0 interleaved to RGB32.
  * y, u, and v point to Y,U, and V panes, where U and V values are interleaved.
  */
-static void _NVXXToRGB32(const uint8_t* Y,
-                         const uint8_t* U,
-                         const uint8_t* V,
-                         uint32_t* rgb,
-                         int width,
-                         int height)
-{
-    _YUV420SToRGB32(Y, U, V, 2, rgb, width, height);
+static void _NVXXToRGB32(const uint8_t* Y, const uint8_t* U, const uint8_t* V,
+                         uint32_t* rgb, int width, int height) {
+  _YUV420SToRGB32(Y, U, V, 2, rgb, width, height);
 }
 
-void NV12ToRGB565(const void* nv12, void* rgb, int width, int height)
-{
-    const int pix_total = width * height;
-    const uint8_t* y = reinterpret_cast<const uint8_t*>(nv12);
-    _NVXXToRGB565(y, y + pix_total, y + pix_total + 1,
-                  reinterpret_cast<uint16_t*>(rgb), width, height);
+void NV12ToRGB565(const void* nv12, void* rgb, int width, int height) {
+  const int pix_total = width * height;
+  const uint8_t* y = reinterpret_cast<const uint8_t*>(nv12);
+  _NVXXToRGB565(y, y + pix_total, y + pix_total + 1,
+                reinterpret_cast<uint16_t*>(rgb), width, height);
 }
 
-void NV12ToRGB32(const void* nv12, void* rgb, int width, int height)
-{
-    const int pix_total = width * height;
-    const uint8_t* y = reinterpret_cast<const uint8_t*>(nv12);
-    _NVXXToRGB32(y, y + pix_total, y + pix_total + 1,
-                 reinterpret_cast<uint32_t*>(rgb), width, height);
+void NV12ToRGB32(const void* nv12, void* rgb, int width, int height) {
+  const int pix_total = width * height;
+  const uint8_t* y = reinterpret_cast<const uint8_t*>(nv12);
+  _NVXXToRGB32(y, y + pix_total, y + pix_total + 1,
+               reinterpret_cast<uint32_t*>(rgb), width, height);
 }
 
-void NV21ToRGB565(const void* nv21, void* rgb, int width, int height)
-{
-    const int pix_total = width * height;
-    const uint8_t* y = reinterpret_cast<const uint8_t*>(nv21);
-    _NVXXToRGB565(y, y + pix_total + 1, y + pix_total,
-                  reinterpret_cast<uint16_t*>(rgb), width, height);
+void NV21ToRGB565(const void* nv21, void* rgb, int width, int height) {
+  const int pix_total = width * height;
+  const uint8_t* y = reinterpret_cast<const uint8_t*>(nv21);
+  _NVXXToRGB565(y, y + pix_total + 1, y + pix_total,
+                reinterpret_cast<uint16_t*>(rgb), width, height);
 }
 
-void NV21ToRGB32(const void* nv21, void* rgb, int width, int height)
-{
-    const int pix_total = width * height;
-    const uint8_t* y = reinterpret_cast<const uint8_t*>(nv21);
-    _NVXXToRGB32(y, y + pix_total + 1, y + pix_total,
-                 reinterpret_cast<uint32_t*>(rgb), width, height);
+void NV21ToRGB32(const void* nv21, void* rgb, int width, int height) {
+  const int pix_total = width * height;
+  const uint8_t* y = reinterpret_cast<const uint8_t*>(nv21);
+  _NVXXToRGB32(y, y + pix_total + 1, y + pix_total,
+               reinterpret_cast<uint32_t*>(rgb), width, height);
 }
 
 }; /* namespace android */
diff --git a/guest/hals/camera/Converters.h b/guest/hals/camera/Converters.h
index 13e2a85..9d7f6a9 100644
--- a/guest/hals/camera/Converters.h
+++ b/guest/hals/camera/Converters.h
@@ -46,32 +46,32 @@
  */
 
 #if __BYTE_ORDER == __LITTLE_ENDIAN
-static const uint16_t kRed5     = 0x001f;
-static const uint16_t kGreen6   = 0x07e0;
-static const uint16_t kBlue5    = 0xf800;
+static const uint16_t kRed5 = 0x001f;
+static const uint16_t kGreen6 = 0x07e0;
+static const uint16_t kBlue5 = 0xf800;
 #else   // __BYTE_ORDER
-static const uint16_t kRed5     = 0xf800;
-static const uint16_t kGreen6   = 0x07e0;
-static const uint16_t kBlue5    = 0x001f;
+static const uint16_t kRed5 = 0xf800;
+static const uint16_t kGreen6 = 0x07e0;
+static const uint16_t kBlue5 = 0x001f;
 #endif  // __BYTE_ORDER
-static const uint32_t kBlack16  = 0x0000;
-static const uint32_t kWhite16  = kRed5 | kGreen6 | kBlue5;
+static const uint32_t kBlack16 = 0x0000;
+static const uint32_t kWhite16 = kRed5 | kGreen6 | kBlue5;
 
 /*
  * RGB32 color masks
  */
 
 #if __BYTE_ORDER == __LITTLE_ENDIAN
-static const uint32_t kRed8     = 0x000000ff;
-static const uint32_t kGreen8   = 0x0000ff00;
-static const uint32_t kBlue8    = 0x00ff0000;
+static const uint32_t kRed8 = 0x000000ff;
+static const uint32_t kGreen8 = 0x0000ff00;
+static const uint32_t kBlue8 = 0x00ff0000;
 #else   // __BYTE_ORDER
-static const uint32_t kRed8     = 0x00ff0000;
-static const uint32_t kGreen8   = 0x0000ff00;
-static const uint32_t kBlue8    = 0x000000ff;
+static const uint32_t kRed8 = 0x00ff0000;
+static const uint32_t kGreen8 = 0x0000ff00;
+static const uint32_t kBlue8 = 0x000000ff;
 #endif  // __BYTE_ORDER
-static const uint32_t kBlack32  = 0x00000000;
-static const uint32_t kWhite32  = kRed8 | kGreen8 | kBlue8;
+static const uint32_t kBlack32 = 0x00000000;
+static const uint32_t kWhite32 = kRed8 | kGreen8 | kBlue8;
 
 /*
  * Extracting, and saving color bytes from / to WORD / DWORD RGB.
@@ -79,62 +79,76 @@
 
 #if __BYTE_ORDER == __LITTLE_ENDIAN
 /* Extract red, green, and blue bytes from RGB565 word. */
-#define R16(rgb)    static_cast<uint8_t>(rgb & kRed5)
-#define G16(rgb)    static_cast<uint8_t>((rgb & kGreen6) >> 5)
-#define B16(rgb)    static_cast<uint8_t>((rgb & kBlue5) >> 11)
+#define R16(rgb) static_cast<uint8_t>(rgb & kRed5)
+#define G16(rgb) static_cast<uint8_t>((rgb & kGreen6) >> 5)
+#define B16(rgb) static_cast<uint8_t>((rgb & kBlue5) >> 11)
 /* Make 8 bits red, green, and blue, extracted from RGB565 word. */
-#define R16_32(rgb) static_cast<uint8_t>(((rgb & kRed5) << 3) | ((rgb & kRed5) >> 2))
-#define G16_32(rgb) static_cast<uint8_t>(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
-#define B16_32(rgb) static_cast<uint8_t>(((rgb & kBlue5) >> 8) | ((rgb & kBlue5) >> 14))
+#define R16_32(rgb) \
+  static_cast<uint8_t>(((rgb & kRed5) << 3) | ((rgb & kRed5) >> 2))
+#define G16_32(rgb) \
+  static_cast<uint8_t>(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
+#define B16_32(rgb) \
+  static_cast<uint8_t>(((rgb & kBlue5) >> 8) | ((rgb & kBlue5) >> 14))
 /* Extract red, green, and blue bytes from RGB32 dword. */
-#define R32(rgb)    static_cast<uint8_t>(rgb & kRed8)
-#define G32(rgb)    static_cast<uint8_t>(((rgb & kGreen8) >> 8) & 0xff)
-#define B32(rgb)    static_cast<uint8_t>(((rgb & kBlue8) >> 16) & 0xff)
+#define R32(rgb) static_cast<uint8_t>(rgb & kRed8)
+#define G32(rgb) static_cast<uint8_t>(((rgb & kGreen8) >> 8) & 0xff)
+#define B32(rgb) static_cast<uint8_t>(((rgb & kBlue8) >> 16) & 0xff)
 /* Build RGB565 word from red, green, and blue bytes. */
-#define RGB565(r, g, b) static_cast<uint16_t>((((static_cast<uint16_t>(b) << 6) | g) << 5) | r)
+#define RGB565(r, g, b) \
+  static_cast<uint16_t>((((static_cast<uint16_t>(b) << 6) | g) << 5) | r)
 /* Build RGB32 dword from red, green, and blue bytes. */
-#define RGB32(r, g, b) static_cast<uint32_t>((((static_cast<uint32_t>(b) << 8) | g) << 8) | r)
-#else   // __BYTE_ORDER
+#define RGB32(r, g, b) \
+  static_cast<uint32_t>((((static_cast<uint32_t>(b) << 8) | g) << 8) | r)
+#else  // __BYTE_ORDER
 /* Extract red, green, and blue bytes from RGB565 word. */
-#define R16(rgb)    static_cast<uint8_t>((rgb & kRed5) >> 11)
-#define G16(rgb)    static_cast<uint8_t>((rgb & kGreen6) >> 5)
-#define B16(rgb)    static_cast<uint8_t>(rgb & kBlue5)
+#define R16(rgb) static_cast<uint8_t>((rgb & kRed5) >> 11)
+#define G16(rgb) static_cast<uint8_t>((rgb & kGreen6) >> 5)
+#define B16(rgb) static_cast<uint8_t>(rgb & kBlue5)
 /* Make 8 bits red, green, and blue, extracted from RGB565 word. */
-#define R16_32(rgb) static_cast<uint8_t>(((rgb & kRed5) >> 8) | ((rgb & kRed5) >> 14))
-#define G16_32(rgb) static_cast<uint8_t>(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
-#define B16_32(rgb) static_cast<uint8_t>(((rgb & kBlue5) << 3) | ((rgb & kBlue5) >> 2))
+#define R16_32(rgb) \
+  static_cast<uint8_t>(((rgb & kRed5) >> 8) | ((rgb & kRed5) >> 14))
+#define G16_32(rgb) \
+  static_cast<uint8_t>(((rgb & kGreen6) >> 3) | ((rgb & kGreen6) >> 9))
+#define B16_32(rgb) \
+  static_cast<uint8_t>(((rgb & kBlue5) << 3) | ((rgb & kBlue5) >> 2))
 /* Extract red, green, and blue bytes from RGB32 dword. */
-#define R32(rgb)    static_cast<uint8_t>((rgb & kRed8) >> 16)
-#define G32(rgb)    static_cast<uint8_t>((rgb & kGreen8) >> 8)
-#define B32(rgb)    static_cast<uint8_t>(rgb & kBlue8)
+#define R32(rgb) static_cast<uint8_t>((rgb & kRed8) >> 16)
+#define G32(rgb) static_cast<uint8_t>((rgb & kGreen8) >> 8)
+#define B32(rgb) static_cast<uint8_t>(rgb & kBlue8)
 /* Build RGB565 word from red, green, and blue bytes. */
-#define RGB565(r, g, b) static_cast<uint16_t>((((static_cast<uint16_t>(r) << 6) | g) << 5) | b)
+#define RGB565(r, g, b) \
+  static_cast<uint16_t>((((static_cast<uint16_t>(r) << 6) | g) << 5) | b)
 /* Build RGB32 dword from red, green, and blue bytes. */
-#define RGB32(r, g, b) static_cast<uint32_t>((((static_cast<uint32_t>(r) << 8) | g) << 8) | b)
+#define RGB32(r, g, b) \
+  static_cast<uint32_t>((((static_cast<uint32_t>(r) << 8) | g) << 8) | b)
 #endif  // __BYTE_ORDER
 
-/* An union that simplifies breaking 32 bit RGB into separate R, G, and B colors.
+/* An union that simplifies breaking 32 bit RGB into separate R, G, and B
+ * colors.
  */
 typedef union RGB32_t {
-    uint32_t    color;
-    struct {
+  uint32_t color;
+  struct {
 #if __BYTE_ORDER == __LITTLE_ENDIAN
-        uint8_t r; uint8_t g; uint8_t b; uint8_t a;
+    uint8_t r;
+    uint8_t g;
+    uint8_t b;
+    uint8_t a;
 #else   // __BYTE_ORDER
-        uint8_t a; uint8_t b; uint8_t g; uint8_t r;
+    uint8_t a;
+    uint8_t b;
+    uint8_t g;
+    uint8_t r;
 #endif  // __BYTE_ORDER
-    };
+  };
 } RGB32_t;
 
-
 /* Clips a value to the unsigned 0-255 range, treating negative values as zero.
  */
-static __inline__ int
-clamp(int x)
-{
-    if (x > 255) return 255;
-    if (x < 0)   return 0;
-    return x;
+static __inline__ int clamp(int x) {
+  if (x > 255) return 255;
+  if (x < 0) return 0;
+  return x;
 }
 
 /********************************************************************************
@@ -144,33 +158,33 @@
 /*
  * RGB -> YUV conversion macros
  */
-#define RGB2Y(r, g, b) (uint8_t)(((66 * (r) + 129 * (g) +  25 * (b) + 128) >> 8) +  16)
-#define RGB2U(r, g, b) (uint8_t)(((-38 * (r) - 74 * (g) + 112 * (b) + 128) >> 8) + 128)
-#define RGB2V(r, g, b) (uint8_t)(((112 * (r) - 94 * (g) -  18 * (b) + 128) >> 8) + 128)
+#define RGB2Y(r, g, b) \
+  (uint8_t)(((66 * (r) + 129 * (g) + 25 * (b) + 128) >> 8) + 16)
+#define RGB2U(r, g, b) \
+  (uint8_t)(((-38 * (r)-74 * (g) + 112 * (b) + 128) >> 8) + 128)
+#define RGB2V(r, g, b) \
+  (uint8_t)(((112 * (r)-94 * (g)-18 * (b) + 128) >> 8) + 128)
 
 /* Converts R8 G8 B8 color to YUV. */
-static __inline__ void
-R8G8B8ToYUV(uint8_t r, uint8_t g, uint8_t b, uint8_t* y, uint8_t* u, uint8_t* v)
-{
-    *y = RGB2Y((int)r, (int)g, (int)b);
-    *u = RGB2U((int)r, (int)g, (int)b);
-    *v = RGB2V((int)r, (int)g, (int)b);
+static __inline__ void R8G8B8ToYUV(uint8_t r, uint8_t g, uint8_t b, uint8_t* y,
+                                   uint8_t* u, uint8_t* v) {
+  *y = RGB2Y((int)r, (int)g, (int)b);
+  *u = RGB2U((int)r, (int)g, (int)b);
+  *v = RGB2V((int)r, (int)g, (int)b);
 }
 
 /* Converts RGB565 color to YUV. */
-static __inline__ void
-RGB565ToYUV(uint16_t rgb, uint8_t* y, uint8_t* u, uint8_t* v)
-{
-    R8G8B8ToYUV(R16_32(rgb), G16_32(rgb), B16_32(rgb), y, u, v);
+static __inline__ void RGB565ToYUV(uint16_t rgb, uint8_t* y, uint8_t* u,
+                                   uint8_t* v) {
+  R8G8B8ToYUV(R16_32(rgb), G16_32(rgb), B16_32(rgb), y, u, v);
 }
 
 /* Converts RGB32 color to YUV. */
-static __inline__ void
-RGB32ToYUV(uint32_t rgb, uint8_t* y, uint8_t* u, uint8_t* v)
-{
-    RGB32_t rgb_c;
-    rgb_c.color = rgb;
-    R8G8B8ToYUV(rgb_c.r, rgb_c.g, rgb_c.b, y, u, v);
+static __inline__ void RGB32ToYUV(uint32_t rgb, uint8_t* y, uint8_t* u,
+                                  uint8_t* v) {
+  RGB32_t rgb_c;
+  rgb_c.color = rgb;
+  R8G8B8ToYUV(rgb_c.r, rgb_c.g, rgb_c.b, y, u, v);
 }
 
 /********************************************************************************
@@ -190,67 +204,59 @@
  *  E = V - 128
  */
 #define YUV2RO(C, D, E) clamp((298 * (C) + 409 * (E) + 128) >> 8)
-#define YUV2GO(C, D, E) clamp((298 * (C) - 100 * (D) - 208 * (E) + 128) >> 8)
+#define YUV2GO(C, D, E) clamp((298 * (C)-100 * (D)-208 * (E) + 128) >> 8)
 #define YUV2BO(C, D, E) clamp((298 * (C) + 516 * (D) + 128) >> 8)
 
 /*
  *  Main macros that take the original Y, U, and V values
  */
 #define YUV2R(y, u, v) clamp((298 * ((y)-16) + 409 * ((v)-128) + 128) >> 8)
-#define YUV2G(y, u, v) clamp((298 * ((y)-16) - 100 * ((u)-128) - 208 * ((v)-128) + 128) >> 8)
+#define YUV2G(y, u, v) \
+  clamp((298 * ((y)-16) - 100 * ((u)-128) - 208 * ((v)-128) + 128) >> 8)
 #define YUV2B(y, u, v) clamp((298 * ((y)-16) + 516 * ((u)-128) + 128) >> 8)
 
-
 /* Converts YUV color to RGB565. */
-static __inline__ uint16_t
-YUVToRGB565(int y, int u, int v)
-{
-    /* Calculate C, D, and E values for the optimized macro. */
-    y -= 16; u -= 128; v -= 128;
-    const uint16_t r = (YUV2RO(y,u,v) >> 3) & 0x1f;
-    const uint16_t g = (YUV2GO(y,u,v) >> 2) & 0x3f;
-    const uint16_t b = (YUV2BO(y,u,v) >> 3) & 0x1f;
-    return RGB565(r, g, b);
+static __inline__ uint16_t YUVToRGB565(int y, int u, int v) {
+  /* Calculate C, D, and E values for the optimized macro. */
+  y -= 16;
+  u -= 128;
+  v -= 128;
+  const uint16_t r = (YUV2RO(y, u, v) >> 3) & 0x1f;
+  const uint16_t g = (YUV2GO(y, u, v) >> 2) & 0x3f;
+  const uint16_t b = (YUV2BO(y, u, v) >> 3) & 0x1f;
+  return RGB565(r, g, b);
 }
 
 /* Converts YUV color to RGB32. */
-static __inline__ uint32_t
-YUVToRGB32(int y, int u, int v)
-{
-    /* Calculate C, D, and E values for the optimized macro. */
-    y -= 16; u -= 128; v -= 128;
-    RGB32_t rgb;
-    rgb.r = YUV2RO(y,u,v) & 0xff;
-    rgb.g = YUV2GO(y,u,v) & 0xff;
-    rgb.b = YUV2BO(y,u,v) & 0xff;
-    return rgb.color;
+static __inline__ uint32_t YUVToRGB32(int y, int u, int v) {
+  /* Calculate C, D, and E values for the optimized macro. */
+  y -= 16;
+  u -= 128;
+  v -= 128;
+  RGB32_t rgb;
+  rgb.r = YUV2RO(y, u, v) & 0xff;
+  rgb.g = YUV2GO(y, u, v) & 0xff;
+  rgb.b = YUV2BO(y, u, v) & 0xff;
+  return rgb.color;
 }
 
 /* YUV pixel descriptor. */
 struct YUVPixel {
-    uint8_t     Y;
-    uint8_t     U;
-    uint8_t     V;
+  uint8_t Y;
+  uint8_t U;
+  uint8_t V;
 
-    inline YUVPixel()
-        : Y(0), U(0), V(0)
-    {
-    }
+  inline YUVPixel() : Y(0), U(0), V(0) {}
 
-    inline explicit YUVPixel(uint16_t rgb565)
-    {
-        RGB565ToYUV(rgb565, &Y, &U, &V);
-    }
+  inline explicit YUVPixel(uint16_t rgb565) { RGB565ToYUV(rgb565, &Y, &U, &V); }
 
-    inline explicit YUVPixel(uint32_t rgb32)
-    {
-        RGB32ToYUV(rgb32, &Y, &U, &V);
-    }
+  inline explicit YUVPixel(uint32_t rgb32) { RGB32ToYUV(rgb32, &Y, &U, &V); }
 
-    inline void get(uint8_t* pY, uint8_t* pU, uint8_t* pV) const
-    {
-        *pY = Y; *pU = U; *pV = V;
-    }
+  inline void get(uint8_t* pY, uint8_t* pU, uint8_t* pV) const {
+    *pY = Y;
+    *pU = U;
+    *pV = V;
+  }
 };
 
 /* Converts an YV12 framebuffer to RGB565 framebuffer.
@@ -311,4 +317,4 @@
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_CONVERTERS_H */
+#endif /* HW_EMULATOR_CAMERA_CONVERTERS_H */
diff --git a/guest/hals/camera/EmulatedBaseCamera.cpp b/guest/hals/camera/EmulatedBaseCamera.cpp
index 2fbbc0b..23f887e 100644
--- a/guest/hals/camera/EmulatedBaseCamera.cpp
+++ b/guest/hals/camera/EmulatedBaseCamera.cpp
@@ -32,87 +32,81 @@
 
 namespace android {
 
-EmulatedBaseCamera::EmulatedBaseCamera(int cameraId,
-        uint32_t cameraVersion,
-        struct hw_device_t* device,
-        struct hw_module_t* module)
-        : mCameraInfo(NULL),
-          mCameraID(cameraId),
-          mCameraDeviceVersion(cameraVersion)
-{
-    /*
-     * Initialize camera_device descriptor for this object.
-     */
+EmulatedBaseCamera::EmulatedBaseCamera(int cameraId, uint32_t cameraVersion,
+                                       struct hw_device_t* device,
+                                       struct hw_module_t* module)
+    : mCameraInfo(NULL),
+      mCameraID(cameraId),
+      mCameraDeviceVersion(cameraVersion) {
+  /*
+   * Initialize camera_device descriptor for this object.
+   */
 
-    /* Common header */
-    device->tag = HARDWARE_DEVICE_TAG;
-    device->version = cameraVersion;
-    device->module = module;
-    device->close = NULL; // Must be filled in by child implementation
+  /* Common header */
+  device->tag = HARDWARE_DEVICE_TAG;
+  device->version = cameraVersion;
+  device->module = module;
+  device->close = NULL;  // Must be filled in by child implementation
 }
 
-EmulatedBaseCamera::~EmulatedBaseCamera()
-{
-}
+EmulatedBaseCamera::~EmulatedBaseCamera() {}
 
-status_t EmulatedBaseCamera::getCameraInfo(struct camera_info* info)
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedBaseCamera::getCameraInfo(struct camera_info* info) {
+  ALOGV("%s", __FUNCTION__);
 
-    info->device_version = mCameraDeviceVersion;
+  info->device_version = mCameraDeviceVersion;
 #if VSOC_PLATFORM_SDK_BEFORE(O)
-    // static_camera_characteristics should be initialized if and only if two
-    // conditions hold:
-    //    CAMERA_MODULE_API_VERSION_2_0 or higher
-    //    CAMERA_DEVICE_API_VERSION_2_0 or higher
-    // See android/hardware/libhardware/include/hardware/camera_common.h
-    //
-    // The CAMERA_MODULE_API_VERSION is above 2 on all of the supported
-    // branches.
-    //
-    // The CVD supports both CAMERA_DEVICE_API_VERSION_1_0 and
-    // CAMERA_DEVICE_API_VERSION_3_0.
-    //
-    // By the spec, the framework should not look at this field on
-    // CAMERA_DEVICE_API_VERSION_1_0. However, the framework
-    // referenced them unconditionally in the M, N, and N-MR1 branches.
-    // See b/67841929 for evidence.
-    //
-    // We have to support those branches, so make initialization uconditional.
-    // However, keep the 0xcafef00d fake initiziation on O and later to ensure
-    // that we'll catch future framework changes that violate the spec.
-    info->static_camera_characteristics = mCameraInfo;
+  // static_camera_characteristics should be initialized if and only if two
+  // conditions hold:
+  //    CAMERA_MODULE_API_VERSION_2_0 or higher
+  //    CAMERA_DEVICE_API_VERSION_2_0 or higher
+  // See android/hardware/libhardware/include/hardware/camera_common.h
+  //
+  // The CAMERA_MODULE_API_VERSION is above 2 on all of the supported
+  // branches.
+  //
+  // The CVD supports both CAMERA_DEVICE_API_VERSION_1_0 and
+  // CAMERA_DEVICE_API_VERSION_3_0.
+  //
+  // By the spec, the framework should not look at this field on
+  // CAMERA_DEVICE_API_VERSION_1_0. However, the framework
+  // referenced them unconditionally in the M, N, and N-MR1 branches.
+  // See b/67841929 for evidence.
+  //
+  // We have to support those branches, so make initialization uconditional.
+  // However, keep the 0xcafef00d fake initiziation on O and later to ensure
+  // that we'll catch future framework changes that violate the spec.
+  info->static_camera_characteristics = mCameraInfo;
 #else
-    if (mCameraDeviceVersion >= HARDWARE_DEVICE_API_VERSION(2, 0)) {
-        info->static_camera_characteristics = mCameraInfo;
-    } else {
-        info->static_camera_characteristics = (camera_metadata_t*)0xcafef00d;
-    }
+  if (mCameraDeviceVersion >= HARDWARE_DEVICE_API_VERSION(2, 0)) {
+    info->static_camera_characteristics = mCameraInfo;
+  } else {
+    info->static_camera_characteristics = (camera_metadata_t*)0xcafef00d;
+  }
 #endif
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 status_t EmulatedBaseCamera::plugCamera() {
-    ALOGE("%s: not supported", __FUNCTION__);
-    return INVALID_OPERATION;
+  ALOGE("%s: not supported", __FUNCTION__);
+  return INVALID_OPERATION;
 }
 
 status_t EmulatedBaseCamera::unplugCamera() {
-    ALOGE("%s: not supported", __FUNCTION__);
-    return INVALID_OPERATION;
+  ALOGE("%s: not supported", __FUNCTION__);
+  return INVALID_OPERATION;
 }
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
 camera_device_status_t EmulatedBaseCamera::getHotplugStatus() {
-    return CAMERA_DEVICE_STATUS_PRESENT;
+  return CAMERA_DEVICE_STATUS_PRESENT;
 }
 #endif
 
 status_t EmulatedBaseCamera::setTorchMode(bool /* enabled */) {
-    ALOGE("%s: not supported", __FUNCTION__);
-    return INVALID_OPERATION;
+  ALOGE("%s: not supported", __FUNCTION__);
+  return INVALID_OPERATION;
 }
 
-
 } /* namespace android */
diff --git a/guest/hals/camera/EmulatedBaseCamera.h b/guest/hals/camera/EmulatedBaseCamera.h
index ab64b45..a59ae23 100644
--- a/guest/hals/camera/EmulatedBaseCamera.h
+++ b/guest/hals/camera/EmulatedBaseCamera.h
@@ -19,9 +19,9 @@
 
 #include <hardware/camera_common.h>
 #include <utils/Errors.h>
-#include "guest/libs/platform_support/api_level_fixes.h"
 #include "CameraConfiguration.h"
 #include "ImageMetadata.h"
+#include "guest/libs/platform_support/api_level_fixes.h"
 
 namespace android {
 
@@ -36,96 +36,92 @@
  */
 
 class EmulatedBaseCamera {
-  public:
-    EmulatedBaseCamera(int cameraId,
-            uint32_t cameraVersion,
-            struct hw_device_t* device,
-            struct hw_module_t* module);
+ public:
+  EmulatedBaseCamera(int cameraId, uint32_t cameraVersion,
+                     struct hw_device_t* device, struct hw_module_t* module);
 
-    virtual ~EmulatedBaseCamera();
+  virtual ~EmulatedBaseCamera();
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-  public:
-    /* Initializes EmulatedCamera instance.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    virtual status_t Initialize(const cvd::CameraDefinition& params) = 0;
+ public:
+  /* Initializes EmulatedCamera instance.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  virtual status_t Initialize(const cvd::CameraDefinition& params) = 0;
 
-    /****************************************************************************
-     * Camera API implementation
-     ***************************************************************************/
+  /****************************************************************************
+   * Camera API implementation
+   ***************************************************************************/
 
-  public:
-    /* Creates connection to the emulated camera device.
-     * This method is called in response to hw_module_methods_t::open callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negative EXXX statuses.
-     */
-    virtual status_t connectCamera(hw_device_t** device) = 0;
+ public:
+  /* Creates connection to the emulated camera device.
+   * This method is called in response to hw_module_methods_t::open callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negative EXXX statuses.
+   */
+  virtual status_t connectCamera(hw_device_t** device) = 0;
 
+  /* Plug the connection for the emulated camera. Until it's plugged in
+   * calls to connectCamera should fail with -ENODEV.
+   */
+  virtual status_t plugCamera();
 
-    /* Plug the connection for the emulated camera. Until it's plugged in
-     * calls to connectCamera should fail with -ENODEV.
-     */
-    virtual status_t plugCamera();
-
-    /* Unplug the connection from underneath the emulated camera.
-     * This is similar to closing the camera, except that
-     * all function calls into the camera device will return
-     * -EPIPE errors until the camera is reopened.
-     */
-    virtual status_t unplugCamera();
+  /* Unplug the connection from underneath the emulated camera.
+   * This is similar to closing the camera, except that
+   * all function calls into the camera device will return
+   * -EPIPE errors until the camera is reopened.
+   */
+  virtual status_t unplugCamera();
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-    virtual camera_device_status_t getHotplugStatus();
+  virtual camera_device_status_t getHotplugStatus();
 #endif
 
-    /* Closes connection to the emulated camera.
-     * This method is called in response to camera_device::close callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negative EXXX statuses.
-     */
-    virtual status_t closeCamera() = 0;
+  /* Closes connection to the emulated camera.
+   * This method is called in response to camera_device::close callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negative EXXX statuses.
+   */
+  virtual status_t closeCamera() = 0;
 
-    /* Gets camera information.
-     * This method is called in response to camera_module_t::get_camera_info
-     * callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negative EXXX statuses.
-     */
-    virtual status_t getCameraInfo(struct camera_info* info) = 0;
+  /* Gets camera information.
+   * This method is called in response to camera_module_t::get_camera_info
+   * callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negative EXXX statuses.
+   */
+  virtual status_t getCameraInfo(struct camera_info* info) = 0;
 
-    /* Gets image metadata.
-     * This method is called to collect metadata for (currently) taken picture.
-     */
-    virtual status_t getImageMetadata(struct ImageMetadata* meta) = 0;
+  /* Gets image metadata.
+   * This method is called to collect metadata for (currently) taken picture.
+   */
+  virtual status_t getImageMetadata(struct ImageMetadata* meta) = 0;
 
-    /* Set torch mode.
-     * This method is called in response to camera_module_t::set_torch_mode
-     * callback.
-     */
-    virtual status_t setTorchMode(bool enabled);
+  /* Set torch mode.
+   * This method is called in response to camera_module_t::set_torch_mode
+   * callback.
+   */
+  virtual status_t setTorchMode(bool enabled);
 
-    /****************************************************************************
-     * Data members
-     ***************************************************************************/
+  /****************************************************************************
+   * Data members
+   ***************************************************************************/
 
-  protected:
-    /* Fixed camera information for camera2 devices. Must be valid to access if
-     * mCameraDeviceVersion is >= HARDWARE_DEVICE_API_VERSION(2,0)  */
-    camera_metadata_t *mCameraInfo;
+ protected:
+  /* Fixed camera information for camera2 devices. Must be valid to access if
+   * mCameraDeviceVersion is >= HARDWARE_DEVICE_API_VERSION(2,0)  */
+  camera_metadata_t* mCameraInfo;
 
-    /* Zero-based ID assigned to this camera. */
-    int mCameraID;
+  /* Zero-based ID assigned to this camera. */
+  int mCameraID;
 
-  private:
-
-    /* Version of the camera device HAL implemented by this camera */
-    int mCameraDeviceVersion;
+ private:
+  /* Version of the camera device HAL implemented by this camera */
+  int mCameraDeviceVersion;
 };
 
 } /* namespace android */
diff --git a/guest/hals/camera/EmulatedCamera.cpp b/guest/hals/camera/EmulatedCamera.cpp
index dd0a4cb..a6216a2 100644
--- a/guest/hals/camera/EmulatedCamera.cpp
+++ b/guest/hals/camera/EmulatedCamera.cpp
@@ -38,13 +38,9 @@
 namespace android {
 namespace {
 const char* kSupportedFlashModes[] = {
-  CameraParameters::FLASH_MODE_OFF,
-  CameraParameters::FLASH_MODE_AUTO,
-  CameraParameters::FLASH_MODE_ON,
-  CameraParameters::FLASH_MODE_RED_EYE,
-  CameraParameters::FLASH_MODE_TORCH,
-  NULL
-};
+    CameraParameters::FLASH_MODE_OFF,   CameraParameters::FLASH_MODE_AUTO,
+    CameraParameters::FLASH_MODE_ON,    CameraParameters::FLASH_MODE_RED_EYE,
+    CameraParameters::FLASH_MODE_TORCH, NULL};
 
 std::string BuildParameterValue(const char** value_array) {
   std::string result;
@@ -71,732 +67,683 @@
  *  current - Current set of camera parameters.
  *  new_par - String representation of new parameters.
  */
-static void PrintParamDiff(const CameraParameters& current, const char* new_par);
+static void PrintParamDiff(const CameraParameters& current,
+                           const char* new_par);
 #else
-#define PrintParamDiff(current, new_par)   (void(0))
-#endif  /* DEBUG_PARAM */
+#define PrintParamDiff(current, new_par) (void(0))
+#endif /* DEBUG_PARAM */
 
 /* A helper routine that adds a value to the camera parameter.
  * Param:
  *  param - Camera parameter to add a value to.
  *  val - Value to add.
  * Return:
- *  A new string containing parameter with the added value on success, or NULL on
- *  a failure. If non-NULL string is returned, the caller is responsible for
+ *  A new string containing parameter with the added value on success, or NULL
+ * on a failure. If non-NULL string is returned, the caller is responsible for
  *  freeing it with 'free'.
  */
 static char* AddValue(const char* param, const char* val);
 
-EmulatedCamera::EmulatedCamera(int cameraId,
-                               struct hw_module_t* module)
-        : EmulatedBaseCamera(cameraId,
-                HARDWARE_DEVICE_API_VERSION(1, 0),
-                &common,
-                module),
-          mPreviewWindow(),
-          mCallbackNotifier()
-{
-    /* camera_device v1 fields. */
-    common.close = EmulatedCamera::close;
-    ops = &mDeviceOps;
-    priv = this;
+EmulatedCamera::EmulatedCamera(int cameraId, struct hw_module_t* module)
+    : EmulatedBaseCamera(cameraId, HARDWARE_DEVICE_API_VERSION(1, 0), &common,
+                         module),
+      mPreviewWindow(),
+      mCallbackNotifier() {
+  /* camera_device v1 fields. */
+  common.close = EmulatedCamera::close;
+  ops = &mDeviceOps;
+  priv = this;
 }
 
-EmulatedCamera::~EmulatedCamera()
-{
-}
+EmulatedCamera::~EmulatedCamera() {}
 
 /****************************************************************************
  * Public API
  ***************************************************************************/
 
-status_t EmulatedCamera::Initialize(const cvd::CameraDefinition&)
-{
-    /* Preview formats supported by this HAL. */
-    char preview_formats[1024];
-    snprintf(preview_formats, sizeof(preview_formats), "%s,%s,%s",
-             CameraParameters::PIXEL_FORMAT_YUV420SP,
-             CameraParameters::PIXEL_FORMAT_YUV420P,
-             CameraParameters::PIXEL_FORMAT_RGBA8888);
+status_t EmulatedCamera::Initialize(const cvd::CameraDefinition&) {
+  /* Preview formats supported by this HAL. */
+  char preview_formats[1024];
+  snprintf(preview_formats, sizeof(preview_formats), "%s,%s,%s",
+           CameraParameters::PIXEL_FORMAT_YUV420SP,
+           CameraParameters::PIXEL_FORMAT_YUV420P,
+           CameraParameters::PIXEL_FORMAT_RGBA8888);
 
-    /*
-     * Fake required parameters.
-     */
+  /*
+   * Fake required parameters.
+   */
 
-    mParameters.set(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES, "320x240,0x0");
+  mParameters.set(CameraParameters::KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES,
+                  "320x240,0x0");
 
-    mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, "512");
-    mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, "384");
-    mParameters.set(CameraParameters::KEY_JPEG_QUALITY, "90");
-    mParameters.set(CameraParameters::KEY_FOCAL_LENGTH, "4.31");
-    mParameters.set(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, "54.8");
-    mParameters.set(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, "42.5");
-    mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY, "90");
+  mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH, "512");
+  mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT, "384");
+  mParameters.set(CameraParameters::KEY_JPEG_QUALITY, "90");
+  mParameters.set(CameraParameters::KEY_FOCAL_LENGTH, "4.31");
+  mParameters.set(CameraParameters::KEY_HORIZONTAL_VIEW_ANGLE, "54.8");
+  mParameters.set(CameraParameters::KEY_VERTICAL_VIEW_ANGLE, "42.5");
+  mParameters.set(CameraParameters::KEY_JPEG_THUMBNAIL_QUALITY, "90");
 
-    /* Preview format settings used here are related to panoramic view only. It's
-     * not related to the preview window that works only with RGB frames, which
-     * is explicitly stated when set_buffers_geometry is called on the preview
-     * window object. */
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS,
-                    preview_formats);
-    mParameters.setPreviewFormat(CameraParameters::PIXEL_FORMAT_YUV420SP);
+  /* Preview format settings used here are related to panoramic view only. It's
+   * not related to the preview window that works only with RGB frames, which
+   * is explicitly stated when set_buffers_geometry is called on the preview
+   * window object. */
+  mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FORMATS,
+                  preview_formats);
+  mParameters.setPreviewFormat(CameraParameters::PIXEL_FORMAT_YUV420SP);
 
-    /* We don't relay on the actual frame rates supported by the camera device,
-     * since we will emulate them through timeouts in the emulated camera device
-     * worker thread. */
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES,
-                    "30,24,20,15,10,5");
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE,
-                    "(5000,30000),(15000,15000),(30000,30000)");
-    mParameters.set(CameraParameters::KEY_PREVIEW_FPS_RANGE,
-                    "5000,30000");
-    mParameters.setPreviewFrameRate(30000);
+  /* We don't relay on the actual frame rates supported by the camera device,
+   * since we will emulate them through timeouts in the emulated camera device
+   * worker thread. */
+  mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FRAME_RATES,
+                  "30,24,20,15,10,5");
+  mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_FPS_RANGE,
+                  "(5000,30000),(15000,15000),(30000,30000)");
+  mParameters.set(CameraParameters::KEY_PREVIEW_FPS_RANGE, "5000,30000");
+  mParameters.setPreviewFrameRate(30000);
 
-    /* Only PIXEL_FORMAT_YUV420P is accepted by video framework in emulator! */
-    mParameters.set(CameraParameters::KEY_VIDEO_FRAME_FORMAT,
-                    CameraParameters::PIXEL_FORMAT_YUV420P);
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS,
-                    CameraParameters::PIXEL_FORMAT_JPEG);
-    mParameters.setPictureFormat(CameraParameters::PIXEL_FORMAT_JPEG);
+  /* Only PIXEL_FORMAT_YUV420P is accepted by video framework in emulator! */
+  mParameters.set(CameraParameters::KEY_VIDEO_FRAME_FORMAT,
+                  CameraParameters::PIXEL_FORMAT_YUV420P);
+  mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_FORMATS,
+                  CameraParameters::PIXEL_FORMAT_JPEG);
+  mParameters.setPictureFormat(CameraParameters::PIXEL_FORMAT_JPEG);
 
-    /* Set exposure compensation. */
-    mParameters.set(CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION, "6");
-    mParameters.set(CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION, "-6");
-    mParameters.set(CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP, "0.5");
-    mParameters.set(CameraParameters::KEY_EXPOSURE_COMPENSATION, "0");
+  /* Set exposure compensation. */
+  mParameters.set(CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION, "6");
+  mParameters.set(CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION, "-6");
+  mParameters.set(CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP, "0.5");
+  mParameters.set(CameraParameters::KEY_EXPOSURE_COMPENSATION, "0");
 
-    /* Sets the white balance modes and the device-dependent scale factors. */
-    char supported_white_balance[1024];
-    snprintf(supported_white_balance, sizeof(supported_white_balance),
-             "%s,%s,%s,%s",
-             CameraParameters::WHITE_BALANCE_AUTO,
-             CameraParameters::WHITE_BALANCE_INCANDESCENT,
-             CameraParameters::WHITE_BALANCE_DAYLIGHT,
-             CameraParameters::WHITE_BALANCE_TWILIGHT);
-    mParameters.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
-                    supported_white_balance);
-    mParameters.set(CameraParameters::KEY_WHITE_BALANCE,
-                    CameraParameters::WHITE_BALANCE_AUTO);
-    getCameraDevice()->initializeWhiteBalanceModes(
-            CameraParameters::WHITE_BALANCE_AUTO, 1.0f, 1.0f);
-    getCameraDevice()->initializeWhiteBalanceModes(
-            CameraParameters::WHITE_BALANCE_INCANDESCENT, 1.38f, 0.60f);
-    getCameraDevice()->initializeWhiteBalanceModes(
-            CameraParameters::WHITE_BALANCE_DAYLIGHT, 1.09f, 0.92f);
-    getCameraDevice()->initializeWhiteBalanceModes(
-            CameraParameters::WHITE_BALANCE_TWILIGHT, 0.92f, 1.22f);
-    getCameraDevice()->setWhiteBalanceMode(CameraParameters::WHITE_BALANCE_AUTO);
+  /* Sets the white balance modes and the device-dependent scale factors. */
+  char supported_white_balance[1024];
+  snprintf(supported_white_balance, sizeof(supported_white_balance),
+           "%s,%s,%s,%s", CameraParameters::WHITE_BALANCE_AUTO,
+           CameraParameters::WHITE_BALANCE_INCANDESCENT,
+           CameraParameters::WHITE_BALANCE_DAYLIGHT,
+           CameraParameters::WHITE_BALANCE_TWILIGHT);
+  mParameters.set(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE,
+                  supported_white_balance);
+  mParameters.set(CameraParameters::KEY_WHITE_BALANCE,
+                  CameraParameters::WHITE_BALANCE_AUTO);
+  getCameraDevice()->initializeWhiteBalanceModes(
+      CameraParameters::WHITE_BALANCE_AUTO, 1.0f, 1.0f);
+  getCameraDevice()->initializeWhiteBalanceModes(
+      CameraParameters::WHITE_BALANCE_INCANDESCENT, 1.38f, 0.60f);
+  getCameraDevice()->initializeWhiteBalanceModes(
+      CameraParameters::WHITE_BALANCE_DAYLIGHT, 1.09f, 0.92f);
+  getCameraDevice()->initializeWhiteBalanceModes(
+      CameraParameters::WHITE_BALANCE_TWILIGHT, 0.92f, 1.22f);
+  getCameraDevice()->setWhiteBalanceMode(CameraParameters::WHITE_BALANCE_AUTO);
 
-    /*
-     * Not supported features
-     */
-    mParameters.set(CameraParameters::KEY_SUPPORTED_FOCUS_MODES,
-                    CameraParameters::FOCUS_MODE_FIXED);
-    mParameters.set(CameraParameters::KEY_FOCUS_MODE,
-                    CameraParameters::FOCUS_MODE_FIXED);
-    mParameters.set(CameraParameters::KEY_SUPPORTED_FLASH_MODES,
-                    BuildParameterValue(kSupportedFlashModes).c_str());
-    mParameters.set(CameraParameters::KEY_FLASH_MODE,
-                    CameraParameters::FLASH_MODE_OFF);
-    mParameters.set(CameraParameters::KEY_FOCUS_DISTANCES, "0.1,0.1,0.1");
-    mParameters.set(CameraParameters::KEY_MAX_NUM_DETECTED_FACES_HW, "0");
-    mParameters.set(CameraParameters::KEY_MAX_NUM_DETECTED_FACES_SW, "0");
-    mParameters.set(CameraParameters::KEY_ZOOM_RATIOS, "100");
-    mParameters.set(CameraParameters::KEY_ZOOM_SUPPORTED,
-                    CameraParameters::FALSE);
-    mParameters.set(CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED,
-                    CameraParameters::FALSE);
-    mParameters.set(CameraParameters::KEY_ZOOM, "0");
-    mParameters.set(CameraParameters::KEY_MAX_ZOOM, "0");
+  /*
+   * Not supported features
+   */
+  mParameters.set(CameraParameters::KEY_SUPPORTED_FOCUS_MODES,
+                  CameraParameters::FOCUS_MODE_FIXED);
+  mParameters.set(CameraParameters::KEY_FOCUS_MODE,
+                  CameraParameters::FOCUS_MODE_FIXED);
+  mParameters.set(CameraParameters::KEY_SUPPORTED_FLASH_MODES,
+                  BuildParameterValue(kSupportedFlashModes).c_str());
+  mParameters.set(CameraParameters::KEY_FLASH_MODE,
+                  CameraParameters::FLASH_MODE_OFF);
+  mParameters.set(CameraParameters::KEY_FOCUS_DISTANCES, "0.1,0.1,0.1");
+  mParameters.set(CameraParameters::KEY_MAX_NUM_DETECTED_FACES_HW, "0");
+  mParameters.set(CameraParameters::KEY_MAX_NUM_DETECTED_FACES_SW, "0");
+  mParameters.set(CameraParameters::KEY_ZOOM_RATIOS, "100");
+  mParameters.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+                  CameraParameters::FALSE);
+  mParameters.set(CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED,
+                  CameraParameters::FALSE);
+  mParameters.set(CameraParameters::KEY_ZOOM, "0");
+  mParameters.set(CameraParameters::KEY_MAX_ZOOM, "0");
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
-void EmulatedCamera::onNextFrameAvailable(const void* frame,
-                                          nsecs_t timestamp,
-                                          EmulatedCameraDevice* camera_dev)
-{
-    /* Notify the preview window first. */
-    mPreviewWindow.onNextFrameAvailable(frame, timestamp, camera_dev);
+void EmulatedCamera::onNextFrameAvailable(const void* frame, nsecs_t timestamp,
+                                          EmulatedCameraDevice* camera_dev) {
+  /* Notify the preview window first. */
+  mPreviewWindow.onNextFrameAvailable(frame, timestamp, camera_dev);
 
-    /* Notify callback notifier next. */
-    mCallbackNotifier.onNextFrameAvailable(frame, timestamp, camera_dev);
+  /* Notify callback notifier next. */
+  mCallbackNotifier.onNextFrameAvailable(frame, timestamp, camera_dev);
 }
 
-void EmulatedCamera::onCameraDeviceError(int err)
-{
-    /* Errors are reported through the callback notifier */
-    mCallbackNotifier.onCameraDeviceError(err);
+void EmulatedCamera::onCameraDeviceError(int err) {
+  /* Errors are reported through the callback notifier */
+  mCallbackNotifier.onCameraDeviceError(err);
 }
 
 void EmulatedCamera::onCameraFocusAcquired() {
-    mCallbackNotifier.onCameraFocusAcquired();
+  mCallbackNotifier.onCameraFocusAcquired();
 }
 
 /****************************************************************************
  * Camera API implementation.
  ***************************************************************************/
 
-status_t EmulatedCamera::connectCamera(hw_device_t** device)
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCamera::connectCamera(hw_device_t** device) {
+  ALOGV("%s", __FUNCTION__);
 
-    status_t res = EINVAL;
-    EmulatedCameraDevice* const camera_dev = getCameraDevice();
-    ALOGE_IF(camera_dev == NULL, "%s: No camera device instance.", __FUNCTION__);
+  status_t res = EINVAL;
+  EmulatedCameraDevice* const camera_dev = getCameraDevice();
+  ALOGE_IF(camera_dev == NULL, "%s: No camera device instance.", __FUNCTION__);
 
-    if (camera_dev != NULL) {
-        /* Connect to the camera device. */
-        res = getCameraDevice()->connectDevice();
-        if (res == NO_ERROR) {
-            *device = &common;
-        }
+  if (camera_dev != NULL) {
+    /* Connect to the camera device. */
+    res = getCameraDevice()->connectDevice();
+    if (res == NO_ERROR) {
+      *device = &common;
     }
+  }
 
-    return -res;
+  return -res;
 }
 
-status_t EmulatedCamera::closeCamera()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCamera::closeCamera() {
+  ALOGV("%s", __FUNCTION__);
 
-    return cleanupCamera();
+  return cleanupCamera();
 }
 
-status_t EmulatedCamera::getCameraInfo(struct camera_info* info)
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCamera::getCameraInfo(struct camera_info* info) {
+  ALOGV("%s", __FUNCTION__);
 
-    const char* valstr = NULL;
+  const char* valstr = NULL;
 
-    valstr = mParameters.get(EmulatedCamera::FACING_KEY);
-    if (valstr != NULL) {
-        if (strcmp(valstr, EmulatedCamera::FACING_FRONT) == 0) {
-            info->facing = CAMERA_FACING_FRONT;
-        }
-        else if (strcmp(valstr, EmulatedCamera::FACING_BACK) == 0) {
-            info->facing = CAMERA_FACING_BACK;
-        }
-    } else {
-        info->facing = CAMERA_FACING_BACK;
+  valstr = mParameters.get(EmulatedCamera::FACING_KEY);
+  if (valstr != NULL) {
+    if (strcmp(valstr, EmulatedCamera::FACING_FRONT) == 0) {
+      info->facing = CAMERA_FACING_FRONT;
+    } else if (strcmp(valstr, EmulatedCamera::FACING_BACK) == 0) {
+      info->facing = CAMERA_FACING_BACK;
     }
+  } else {
+    info->facing = CAMERA_FACING_BACK;
+  }
 
-    valstr = mParameters.get(EmulatedCamera::ORIENTATION_KEY);
-    if (valstr != NULL) {
-        info->orientation = atoi(valstr);
-    } else {
-        info->orientation = 0;
-    }
+  valstr = mParameters.get(EmulatedCamera::ORIENTATION_KEY);
+  if (valstr != NULL) {
+    info->orientation = atoi(valstr);
+  } else {
+    info->orientation = 0;
+  }
 
 #if VSOC_PLATFORM_SDK_AFTER(L_MR1)
-    info->resource_cost = 100;
-    info->conflicting_devices = NULL;
-    info->conflicting_devices_length = 0;
+  info->resource_cost = 100;
+  info->conflicting_devices = NULL;
+  info->conflicting_devices_length = 0;
 #endif
 
-    return EmulatedBaseCamera::getCameraInfo(info);
+  return EmulatedBaseCamera::getCameraInfo(info);
 }
 
 status_t EmulatedCamera::getImageMetadata(struct ImageMetadata* meta) {
-    meta->mLensFocalLength =
-        mParameters.getFloat(CameraParameters::KEY_FOCAL_LENGTH);
-    meta->mGpsLatitude =
-        mParameters.getFloat(CameraParameters::KEY_GPS_LATITUDE);
-    meta->mGpsLongitude =
-        mParameters.getFloat(CameraParameters::KEY_GPS_LONGITUDE);
-    meta->mGpsAltitude =
-        mParameters.getFloat(CameraParameters::KEY_GPS_ALTITUDE);
-    meta->mGpsTimestamp =
-        mParameters.getInt(CameraParameters::KEY_GPS_TIMESTAMP);
-    meta->mThumbnailWidth =
-        mParameters.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH);
-    meta->mThumbnailHeight =
-        mParameters.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT);
-    const char* temp =
-        mParameters.get(CameraParameters::KEY_GPS_PROCESSING_METHOD);
-    if (temp) {
-      meta->mGpsProcessingMethod = temp;
-    }
-    return NO_ERROR;
+  meta->mLensFocalLength =
+      mParameters.getFloat(CameraParameters::KEY_FOCAL_LENGTH);
+  meta->mGpsLatitude = mParameters.getFloat(CameraParameters::KEY_GPS_LATITUDE);
+  meta->mGpsLongitude =
+      mParameters.getFloat(CameraParameters::KEY_GPS_LONGITUDE);
+  meta->mGpsAltitude = mParameters.getFloat(CameraParameters::KEY_GPS_ALTITUDE);
+  meta->mGpsTimestamp = mParameters.getInt(CameraParameters::KEY_GPS_TIMESTAMP);
+  meta->mThumbnailWidth =
+      mParameters.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_WIDTH);
+  meta->mThumbnailHeight =
+      mParameters.getInt(CameraParameters::KEY_JPEG_THUMBNAIL_HEIGHT);
+  const char* temp =
+      mParameters.get(CameraParameters::KEY_GPS_PROCESSING_METHOD);
+  if (temp) {
+    meta->mGpsProcessingMethod = temp;
+  }
+  return NO_ERROR;
 }
 
-status_t EmulatedCamera::setPreviewWindow(struct preview_stream_ops* window)
-{
-    /* Callback should return a negative errno. */
-    return -mPreviewWindow.setPreviewWindow(window,
-                                             mParameters.getPreviewFrameRate());
+status_t EmulatedCamera::setPreviewWindow(struct preview_stream_ops* window) {
+  /* Callback should return a negative errno. */
+  return -mPreviewWindow.setPreviewWindow(window,
+                                          mParameters.getPreviewFrameRate());
 }
 
-void EmulatedCamera::setCallbacks(camera_notify_callback notify_cb,
-                                  camera_data_callback data_cb,
-                                  camera_data_timestamp_callback data_cb_timestamp,
-                                  camera_request_memory get_memory,
-                                  void* user)
-{
-    mCallbackNotifier.setCallbacks(notify_cb, data_cb, data_cb_timestamp,
-                                    get_memory, user);
+void EmulatedCamera::setCallbacks(
+    camera_notify_callback notify_cb, camera_data_callback data_cb,
+    camera_data_timestamp_callback data_cb_timestamp,
+    camera_request_memory get_memory, void* user) {
+  mCallbackNotifier.setCallbacks(notify_cb, data_cb, data_cb_timestamp,
+                                 get_memory, user);
 }
 
-void EmulatedCamera::enableMsgType(int32_t msg_type)
-{
-    mCallbackNotifier.enableMessage(msg_type);
+void EmulatedCamera::enableMsgType(int32_t msg_type) {
+  mCallbackNotifier.enableMessage(msg_type);
 }
 
-void EmulatedCamera::disableMsgType(int32_t msg_type)
-{
-    mCallbackNotifier.disableMessage(msg_type);
+void EmulatedCamera::disableMsgType(int32_t msg_type) {
+  mCallbackNotifier.disableMessage(msg_type);
 }
 
-int EmulatedCamera::isMsgTypeEnabled(int32_t msg_type)
-{
-    return mCallbackNotifier.isMessageEnabled(msg_type);
+int EmulatedCamera::isMsgTypeEnabled(int32_t msg_type) {
+  return mCallbackNotifier.isMessageEnabled(msg_type);
 }
 
-status_t EmulatedCamera::startPreview()
-{
-    /* Callback should return a negative errno. */
-    return -doStartPreview();
+status_t EmulatedCamera::startPreview() {
+  /* Callback should return a negative errno. */
+  return -doStartPreview();
 }
 
-void EmulatedCamera::stopPreview()
-{
+void EmulatedCamera::stopPreview() { doStopPreview(); }
+
+int EmulatedCamera::isPreviewEnabled() {
+  return mPreviewWindow.isPreviewEnabled();
+}
+
+status_t EmulatedCamera::storeMetaDataInBuffers(int enable) {
+  /* Callback should return a negative errno. */
+  return -mCallbackNotifier.storeMetaDataInBuffers(enable);
+}
+
+status_t EmulatedCamera::startRecording() {
+  /* Callback should return a negative errno. */
+  return -mCallbackNotifier.enableVideoRecording(
+      mParameters.getPreviewFrameRate());
+}
+
+void EmulatedCamera::stopRecording() {
+  mCallbackNotifier.disableVideoRecording();
+}
+
+int EmulatedCamera::isRecordingEnabled() {
+  return mCallbackNotifier.isVideoRecordingEnabled();
+}
+
+void EmulatedCamera::releaseRecordingFrame(const void* opaque) {
+  mCallbackNotifier.releaseRecordingFrame(opaque);
+}
+
+status_t EmulatedCamera::setAutoFocus() {
+  ALOGV("%s", __FUNCTION__);
+
+  /* Trigger auto-focus. Focus response cannot be sent directly from here. */
+  getCameraDevice()->startAutoFocus();
+
+  /* TODO: Future enhancements. */
+  return NO_ERROR;
+}
+
+status_t EmulatedCamera::cancelAutoFocus() {
+  ALOGV("%s", __FUNCTION__);
+
+  /* TODO: Future enhancements. */
+  return NO_ERROR;
+}
+
+status_t EmulatedCamera::takePicture() {
+  ALOGV("%s", __FUNCTION__);
+
+  status_t res;
+  int width, height;
+  uint32_t org_fmt;
+
+  /* Collect frame info for the picture. */
+  mParameters.getPictureSize(&width, &height);
+  const char* pix_fmt = mParameters.getPictureFormat();
+  if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
+    org_fmt = V4L2_PIX_FMT_YUV420;
+  } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
+    org_fmt = V4L2_PIX_FMT_RGB32;
+  } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
+    org_fmt = V4L2_PIX_FMT_NV21;
+  } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_JPEG) == 0) {
+    /* We only have JPEG converted for NV21 format. */
+    org_fmt = V4L2_PIX_FMT_NV21;
+  } else {
+    ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
+    return EINVAL;
+  }
+  /* Get JPEG quality. */
+  int jpeg_quality = mParameters.getInt(CameraParameters::KEY_JPEG_QUALITY);
+  if (jpeg_quality <= 0) {
+    jpeg_quality = 90; /* Fall back to default. */
+  }
+
+  /*
+   * Make sure preview is not running, and device is stopped before taking
+   * picture.
+   */
+
+  const bool preview_on = mPreviewWindow.isPreviewEnabled();
+  if (preview_on) {
     doStopPreview();
-}
+  }
 
-int EmulatedCamera::isPreviewEnabled()
-{
-    return mPreviewWindow.isPreviewEnabled();
-}
+  /* Camera device should have been stopped when the shutter message has been
+   * enabled. */
+  EmulatedCameraDevice* const camera_dev = getCameraDevice();
+  if (camera_dev->isStarted()) {
+    ALOGW("%s: Camera device is started", __FUNCTION__);
+    camera_dev->stopDeliveringFrames();
+    camera_dev->stopDevice();
+  }
 
-status_t EmulatedCamera::storeMetaDataInBuffers(int enable)
-{
-    /* Callback should return a negative errno. */
-    return -mCallbackNotifier.storeMetaDataInBuffers(enable);
-}
+  /* Compute target FPS rate.
+   * Pretend to simulate generation of (max_fps_rate) */
+  int min_fps_rate, max_fps_rate;
+  mParameters.getPreviewFpsRange(&min_fps_rate, &max_fps_rate);
 
-status_t EmulatedCamera::startRecording()
-{
-    /* Callback should return a negative errno. */
-    return -mCallbackNotifier.enableVideoRecording(mParameters.getPreviewFrameRate());
-}
+  /*
+   * Take the picture now.
+   */
 
-void EmulatedCamera::stopRecording()
-{
-    mCallbackNotifier.disableVideoRecording();
-}
-
-int EmulatedCamera::isRecordingEnabled()
-{
-    return mCallbackNotifier.isVideoRecordingEnabled();
-}
-
-void EmulatedCamera::releaseRecordingFrame(const void* opaque)
-{
-    mCallbackNotifier.releaseRecordingFrame(opaque);
-}
-
-status_t EmulatedCamera::setAutoFocus()
-{
-    ALOGV("%s", __FUNCTION__);
-
-    /* Trigger auto-focus. Focus response cannot be sent directly from here. */
-    getCameraDevice()->startAutoFocus();
-
-    /* TODO: Future enhancements. */
-    return NO_ERROR;
-}
-
-status_t EmulatedCamera::cancelAutoFocus()
-{
-    ALOGV("%s", __FUNCTION__);
-
-    /* TODO: Future enhancements. */
-    return NO_ERROR;
-}
-
-status_t EmulatedCamera::takePicture()
-{
-    ALOGV("%s", __FUNCTION__);
-
-    status_t res;
-    int width, height;
-    uint32_t org_fmt;
-
-    /* Collect frame info for the picture. */
-    mParameters.getPictureSize(&width, &height);
-    const char* pix_fmt = mParameters.getPictureFormat();
-    if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
-        org_fmt = V4L2_PIX_FMT_YUV420;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
-        org_fmt = V4L2_PIX_FMT_RGB32;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
-        org_fmt = V4L2_PIX_FMT_NV21;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_JPEG) == 0) {
-        /* We only have JPEG converted for NV21 format. */
-        org_fmt = V4L2_PIX_FMT_NV21;
-    } else {
-        ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
-        return EINVAL;
-    }
-    /* Get JPEG quality. */
-    int jpeg_quality = mParameters.getInt(CameraParameters::KEY_JPEG_QUALITY);
-    if (jpeg_quality <= 0) {
-        jpeg_quality = 90;  /* Fall back to default. */
-    }
-
-    /*
-     * Make sure preview is not running, and device is stopped before taking
-     * picture.
-     */
-
-    const bool preview_on = mPreviewWindow.isPreviewEnabled();
+  /* Start camera device for the picture frame. */
+  ALOGD("Starting camera for picture: %.4s(%s)[%dx%d]",
+        reinterpret_cast<const char*>(&org_fmt), pix_fmt, width, height);
+  res = camera_dev->startDevice(width, height, org_fmt, max_fps_rate);
+  if (res != NO_ERROR) {
     if (preview_on) {
-        doStopPreview();
-    }
-
-    /* Camera device should have been stopped when the shutter message has been
-     * enabled. */
-    EmulatedCameraDevice* const camera_dev = getCameraDevice();
-    if (camera_dev->isStarted()) {
-        ALOGW("%s: Camera device is started", __FUNCTION__);
-        camera_dev->stopDeliveringFrames();
-        camera_dev->stopDevice();
-    }
-
-    /* Compute target FPS rate.
-     * Pretend to simulate generation of (max_fps_rate) */
-    int min_fps_rate, max_fps_rate;
-    mParameters.getPreviewFpsRange(&min_fps_rate, &max_fps_rate);
-
-    /*
-     * Take the picture now.
-     */
-
-    /* Start camera device for the picture frame. */
-    ALOGD("Starting camera for picture: %.4s(%s)[%dx%d]",
-         reinterpret_cast<const char*>(&org_fmt), pix_fmt, width, height);
-    res = camera_dev->startDevice(width, height, org_fmt, max_fps_rate);
-    if (res != NO_ERROR) {
-        if (preview_on) {
-            doStartPreview();
-        }
-        return res;
-    }
-
-    /* Deliver one frame only. */
-    mCallbackNotifier.setJpegQuality(jpeg_quality);
-    mCallbackNotifier.setTakingPicture(true);
-    res = camera_dev->startDeliveringFrames(true);
-    if (res != NO_ERROR) {
-        mCallbackNotifier.setTakingPicture(false);
-        if (preview_on) {
-            doStartPreview();
-        }
+      doStartPreview();
     }
     return res;
+  }
+
+  /* Deliver one frame only. */
+  mCallbackNotifier.setJpegQuality(jpeg_quality);
+  mCallbackNotifier.setTakingPicture(true);
+  res = camera_dev->startDeliveringFrames(true);
+  if (res != NO_ERROR) {
+    mCallbackNotifier.setTakingPicture(false);
+    if (preview_on) {
+      doStartPreview();
+    }
+  }
+  return res;
 }
 
-status_t EmulatedCamera::cancelPicture()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCamera::cancelPicture() {
+  ALOGV("%s", __FUNCTION__);
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
-status_t EmulatedCamera::setParameters(const char* parms)
-{
-    ALOGV("%s", __FUNCTION__);
-    PrintParamDiff(mParameters, parms);
+status_t EmulatedCamera::setParameters(const char* parms) {
+  ALOGV("%s", __FUNCTION__);
+  PrintParamDiff(mParameters, parms);
 
-    CameraParameters new_param;
-    String8 str8_param(parms);
-    new_param.unflatten(str8_param);
+  CameraParameters new_param;
+  String8 str8_param(parms);
+  new_param.unflatten(str8_param);
 
-    /*
-     * Check if requested dimensions are valid.
-     */
-    if (!CheckParameterValue(new_param.get(CameraParameters::KEY_FLASH_MODE),
-                             kSupportedFlashModes)) {
-      ALOGE("%s: Unsupported flash mode: %s",
-            __FUNCTION__,
-            new_param.get(CameraParameters::KEY_FLASH_MODE));
-      return -EINVAL;
+  /*
+   * Check if requested dimensions are valid.
+   */
+  if (!CheckParameterValue(new_param.get(CameraParameters::KEY_FLASH_MODE),
+                           kSupportedFlashModes)) {
+    ALOGE("%s: Unsupported flash mode: %s", __FUNCTION__,
+          new_param.get(CameraParameters::KEY_FLASH_MODE));
+    return -EINVAL;
+  }
+  if (strcmp(new_param.get(CameraParameters::KEY_FOCUS_MODE),
+             CameraParameters::FOCUS_MODE_FIXED)) {
+    ALOGE("%s: Unsupported flash mode: %s", __FUNCTION__,
+          new_param.get(CameraParameters::KEY_FOCUS_MODE));
+    return -EINVAL;
+  }
+
+  int preview_width, preview_height;
+  new_param.getPreviewSize(&preview_width, &preview_height);
+  if (preview_width <= 0 || preview_height <= 0) return -EINVAL;
+
+  /*
+   * Check for new exposure compensation parameter.
+   */
+  int new_exposure_compensation =
+      new_param.getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
+  const int min_exposure_compensation =
+      new_param.getInt(CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION);
+  const int max_exposure_compensation =
+      new_param.getInt(CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION);
+
+  // Checks if the exposure compensation change is supported.
+  if ((min_exposure_compensation != 0) || (max_exposure_compensation != 0)) {
+    if (new_exposure_compensation > max_exposure_compensation) {
+      new_exposure_compensation = max_exposure_compensation;
     }
-    if (strcmp(new_param.get(CameraParameters::KEY_FOCUS_MODE),
-               CameraParameters::FOCUS_MODE_FIXED)) {
-      ALOGE("%s: Unsupported flash mode: %s",
-            __FUNCTION__,
-            new_param.get(CameraParameters::KEY_FOCUS_MODE));
-      return -EINVAL;
+    if (new_exposure_compensation < min_exposure_compensation) {
+      new_exposure_compensation = min_exposure_compensation;
     }
 
-    int preview_width, preview_height;
-    new_param.getPreviewSize(&preview_width, &preview_height);
-    if (preview_width <= 0 || preview_height <= 0) return -EINVAL;
+    const int current_exposure_compensation =
+        mParameters.getInt(CameraParameters::KEY_EXPOSURE_COMPENSATION);
+    if (current_exposure_compensation != new_exposure_compensation) {
+      const float exposure_value =
+          new_exposure_compensation *
+          new_param.getFloat(CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP);
 
-
-    /*
-     * Check for new exposure compensation parameter.
-     */
-    int new_exposure_compensation = new_param.getInt(
-            CameraParameters::KEY_EXPOSURE_COMPENSATION);
-    const int min_exposure_compensation = new_param.getInt(
-            CameraParameters::KEY_MIN_EXPOSURE_COMPENSATION);
-    const int max_exposure_compensation = new_param.getInt(
-            CameraParameters::KEY_MAX_EXPOSURE_COMPENSATION);
-
-    // Checks if the exposure compensation change is supported.
-    if ((min_exposure_compensation != 0) || (max_exposure_compensation != 0)) {
-        if (new_exposure_compensation > max_exposure_compensation) {
-            new_exposure_compensation = max_exposure_compensation;
-        }
-        if (new_exposure_compensation < min_exposure_compensation) {
-            new_exposure_compensation = min_exposure_compensation;
-        }
-
-        const int current_exposure_compensation = mParameters.getInt(
-                CameraParameters::KEY_EXPOSURE_COMPENSATION);
-        if (current_exposure_compensation != new_exposure_compensation) {
-            const float exposure_value = new_exposure_compensation *
-                    new_param.getFloat(
-                            CameraParameters::KEY_EXPOSURE_COMPENSATION_STEP);
-
-            getCameraDevice()->setExposureCompensation(
-                    exposure_value);
-        }
+      getCameraDevice()->setExposureCompensation(exposure_value);
     }
+  }
 
-    const char* new_white_balance = new_param.get(
-            CameraParameters::KEY_WHITE_BALANCE);
-    const char* supported_white_balance = new_param.get(
-            CameraParameters::KEY_SUPPORTED_WHITE_BALANCE);
+  const char* new_white_balance =
+      new_param.get(CameraParameters::KEY_WHITE_BALANCE);
+  const char* supported_white_balance =
+      new_param.get(CameraParameters::KEY_SUPPORTED_WHITE_BALANCE);
 
-    if ((supported_white_balance != NULL) && (new_white_balance != NULL) &&
-        (strstr(supported_white_balance, new_white_balance) != NULL)) {
-
-        const char* current_white_balance = mParameters.get(
-                CameraParameters::KEY_WHITE_BALANCE);
-        if ((current_white_balance == NULL) ||
-            (strcmp(current_white_balance, new_white_balance) != 0)) {
-            ALOGV("Setting white balance to %s", new_white_balance);
-            getCameraDevice()->setWhiteBalanceMode(new_white_balance);
-        }
+  if ((supported_white_balance != NULL) && (new_white_balance != NULL) &&
+      (strstr(supported_white_balance, new_white_balance) != NULL)) {
+    const char* current_white_balance =
+        mParameters.get(CameraParameters::KEY_WHITE_BALANCE);
+    if ((current_white_balance == NULL) ||
+        (strcmp(current_white_balance, new_white_balance) != 0)) {
+      ALOGV("Setting white balance to %s", new_white_balance);
+      getCameraDevice()->setWhiteBalanceMode(new_white_balance);
     }
+  }
 
-    mParameters = new_param;
+  mParameters = new_param;
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 /* A dumb variable indicating "no params" / error on the exit from
  * EmulatedCamera::getParameters(). */
 static char lNoParam = '\0';
-char* EmulatedCamera::getParameters()
-{
-    String8 params(mParameters.flatten());
-    char* ret_str =
-        reinterpret_cast<char*>(malloc(sizeof(char) * (params.length()+1)));
-    memset(ret_str, 0, params.length()+1);
-    if (ret_str != NULL) {
-        strncpy(ret_str, params.string(), params.length()+1);
-        return ret_str;
-    } else {
-        ALOGE("%s: Unable to allocate string for %s", __FUNCTION__, params.string());
-        /* Apparently, we can't return NULL fron this routine. */
-        return &lNoParam;
-    }
+char* EmulatedCamera::getParameters() {
+  String8 params(mParameters.flatten());
+  char* ret_str =
+      reinterpret_cast<char*>(malloc(sizeof(char) * (params.length() + 1)));
+  memset(ret_str, 0, params.length() + 1);
+  if (ret_str != NULL) {
+    strncpy(ret_str, params.string(), params.length() + 1);
+    return ret_str;
+  } else {
+    ALOGE("%s: Unable to allocate string for %s", __FUNCTION__,
+          params.string());
+    /* Apparently, we can't return NULL fron this routine. */
+    return &lNoParam;
+  }
 }
 
-void EmulatedCamera::putParameters(char* params)
-{
-    /* This method simply frees parameters allocated in getParameters(). */
-    if (params != NULL && params != &lNoParam) {
-        free(params);
-    }
+void EmulatedCamera::putParameters(char* params) {
+  /* This method simply frees parameters allocated in getParameters(). */
+  if (params != NULL && params != &lNoParam) {
+    free(params);
+  }
 }
 
-status_t EmulatedCamera::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
-{
-    ALOGV("%s: cmd = %d, arg1 = %d, arg2 = %d", __FUNCTION__, cmd, arg1, arg2);
+status_t EmulatedCamera::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2) {
+  ALOGV("%s: cmd = %d, arg1 = %d, arg2 = %d", __FUNCTION__, cmd, arg1, arg2);
 
-    switch (cmd) {
-      case CAMERA_CMD_START_FACE_DETECTION:
-      case CAMERA_CMD_STOP_FACE_DETECTION:
-        return -EINVAL;
-    }
+  switch (cmd) {
+    case CAMERA_CMD_START_FACE_DETECTION:
+    case CAMERA_CMD_STOP_FACE_DETECTION:
+      return -EINVAL;
+  }
 
-    /* TODO: Future enhancements. */
-    return 0;
+  /* TODO: Future enhancements. */
+  return 0;
 }
 
-void EmulatedCamera::releaseCamera()
-{
-    ALOGV("%s", __FUNCTION__);
+void EmulatedCamera::releaseCamera() {
+  ALOGV("%s", __FUNCTION__);
 
-    cleanupCamera();
+  cleanupCamera();
 }
 
-status_t EmulatedCamera::dumpCamera(int fd)
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCamera::dumpCamera(int fd) {
+  ALOGV("%s", __FUNCTION__);
 
-    /* TODO: Future enhancements. */
-    return -EINVAL;
+  /* TODO: Future enhancements. */
+  return -EINVAL;
 }
 
 /****************************************************************************
  * Preview management.
  ***************************************************************************/
 
-status_t EmulatedCamera::doStartPreview()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCamera::doStartPreview() {
+  ALOGV("%s", __FUNCTION__);
 
-    EmulatedCameraDevice* camera_dev = getCameraDevice();
-    if (camera_dev->isStarted()) {
-        camera_dev->stopDeliveringFrames();
-        camera_dev->stopDevice();
-    }
+  EmulatedCameraDevice* camera_dev = getCameraDevice();
+  if (camera_dev->isStarted()) {
+    camera_dev->stopDeliveringFrames();
+    camera_dev->stopDevice();
+  }
 
-    status_t res = mPreviewWindow.startPreview();
-    if (res != NO_ERROR) {
-        return res;
-    }
-
-    /* Make sure camera device is connected. */
-    if (!camera_dev->isConnected()) {
-        res = camera_dev->connectDevice();
-        if (res != NO_ERROR) {
-            mPreviewWindow.stopPreview();
-            return res;
-        }
-    }
-
-    int width, height;
-    /* Lets see what should we use for frame width, and height. */
-    if (mParameters.get(CameraParameters::KEY_VIDEO_SIZE) != NULL) {
-        mParameters.getVideoSize(&width, &height);
-    } else {
-        mParameters.getPreviewSize(&width, &height);
-    }
-    /* Lets see what should we use for the frame pixel format. Note that there
-     * are two parameters that define pixel formats for frames sent to the
-     * application via notification callbacks:
-     * - KEY_VIDEO_FRAME_FORMAT, that is used when recording video, and
-     * - KEY_PREVIEW_FORMAT, that is used for preview frame notification.
-     * We choose one or the other, depending on "recording-hint" property set by
-     * the framework that indicating intention: video, or preview. */
-    const char* pix_fmt = NULL;
-    const char* is_video = mParameters.get(EmulatedCamera::RECORDING_HINT_KEY);
-    if (is_video == NULL) {
-        is_video = CameraParameters::FALSE;
-    }
-    if (strcmp(is_video, CameraParameters::TRUE) == 0) {
-        /* Video recording is requested. Lets see if video frame format is set. */
-        pix_fmt = mParameters.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
-    }
-    /* If this was not video recording, or video frame format is not set, lets
-     * use preview pixel format for the main framebuffer. */
-    if (pix_fmt == NULL) {
-        pix_fmt = mParameters.getPreviewFormat();
-    }
-    if (pix_fmt == NULL) {
-        ALOGE("%s: Unable to obtain video format", __FUNCTION__);
-        mPreviewWindow.stopPreview();
-        return EINVAL;
-    }
-
-    /* Convert framework's pixel format to the FOURCC one. */
-    uint32_t org_fmt;
-    if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
-        org_fmt = V4L2_PIX_FMT_YUV420;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
-        org_fmt = V4L2_PIX_FMT_RGB32;
-    } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
-        org_fmt = V4L2_PIX_FMT_NV21;
-    } else {
-        ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
-        mPreviewWindow.stopPreview();
-        return EINVAL;
-    }
-
-    /* Fetch the desired frame rate. */
-    int min_fps_rate, max_fps_rate;
-    mParameters.getPreviewFpsRange(&min_fps_rate, &max_fps_rate);
-
-    ALOGD("Starting camera: %dx%d -> %.4s(%s)",
-         width, height, reinterpret_cast<const char*>(&org_fmt), pix_fmt);
-    res = camera_dev->startDevice(width, height, org_fmt, max_fps_rate);
-    if (res != NO_ERROR) {
-        mPreviewWindow.stopPreview();
-        return res;
-    }
-
-    res = camera_dev->startDeliveringFrames(false);
-    if (res != NO_ERROR) {
-        camera_dev->stopDevice();
-        mPreviewWindow.stopPreview();
-    }
-
+  status_t res = mPreviewWindow.startPreview();
+  if (res != NO_ERROR) {
     return res;
+  }
+
+  /* Make sure camera device is connected. */
+  if (!camera_dev->isConnected()) {
+    res = camera_dev->connectDevice();
+    if (res != NO_ERROR) {
+      mPreviewWindow.stopPreview();
+      return res;
+    }
+  }
+
+  int width, height;
+  /* Lets see what should we use for frame width, and height. */
+  if (mParameters.get(CameraParameters::KEY_VIDEO_SIZE) != NULL) {
+    mParameters.getVideoSize(&width, &height);
+  } else {
+    mParameters.getPreviewSize(&width, &height);
+  }
+  /* Lets see what should we use for the frame pixel format. Note that there
+   * are two parameters that define pixel formats for frames sent to the
+   * application via notification callbacks:
+   * - KEY_VIDEO_FRAME_FORMAT, that is used when recording video, and
+   * - KEY_PREVIEW_FORMAT, that is used for preview frame notification.
+   * We choose one or the other, depending on "recording-hint" property set by
+   * the framework that indicating intention: video, or preview. */
+  const char* pix_fmt = NULL;
+  const char* is_video = mParameters.get(EmulatedCamera::RECORDING_HINT_KEY);
+  if (is_video == NULL) {
+    is_video = CameraParameters::FALSE;
+  }
+  if (strcmp(is_video, CameraParameters::TRUE) == 0) {
+    /* Video recording is requested. Lets see if video frame format is set. */
+    pix_fmt = mParameters.get(CameraParameters::KEY_VIDEO_FRAME_FORMAT);
+  }
+  /* If this was not video recording, or video frame format is not set, lets
+   * use preview pixel format for the main framebuffer. */
+  if (pix_fmt == NULL) {
+    pix_fmt = mParameters.getPreviewFormat();
+  }
+  if (pix_fmt == NULL) {
+    ALOGE("%s: Unable to obtain video format", __FUNCTION__);
+    mPreviewWindow.stopPreview();
+    return EINVAL;
+  }
+
+  /* Convert framework's pixel format to the FOURCC one. */
+  uint32_t org_fmt;
+  if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420P) == 0) {
+    org_fmt = V4L2_PIX_FMT_YUV420;
+  } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_RGBA8888) == 0) {
+    org_fmt = V4L2_PIX_FMT_RGB32;
+  } else if (strcmp(pix_fmt, CameraParameters::PIXEL_FORMAT_YUV420SP) == 0) {
+    org_fmt = V4L2_PIX_FMT_NV21;
+  } else {
+    ALOGE("%s: Unsupported pixel format %s", __FUNCTION__, pix_fmt);
+    mPreviewWindow.stopPreview();
+    return EINVAL;
+  }
+
+  /* Fetch the desired frame rate. */
+  int min_fps_rate, max_fps_rate;
+  mParameters.getPreviewFpsRange(&min_fps_rate, &max_fps_rate);
+
+  ALOGD("Starting camera: %dx%d -> %.4s(%s)", width, height,
+        reinterpret_cast<const char*>(&org_fmt), pix_fmt);
+  res = camera_dev->startDevice(width, height, org_fmt, max_fps_rate);
+  if (res != NO_ERROR) {
+    mPreviewWindow.stopPreview();
+    return res;
+  }
+
+  res = camera_dev->startDeliveringFrames(false);
+  if (res != NO_ERROR) {
+    camera_dev->stopDevice();
+    mPreviewWindow.stopPreview();
+  }
+
+  return res;
 }
 
-status_t EmulatedCamera::doStopPreview()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCamera::doStopPreview() {
+  ALOGV("%s", __FUNCTION__);
 
-    status_t res = NO_ERROR;
-    if (mPreviewWindow.isPreviewEnabled()) {
-        /* Stop the camera. */
-        if (getCameraDevice()->isStarted()) {
-            getCameraDevice()->stopDeliveringFrames();
-            res = getCameraDevice()->stopDevice();
-        }
-
-        if (res == NO_ERROR) {
-            /* Disable preview as well. */
-            mPreviewWindow.stopPreview();
-        }
+  status_t res = NO_ERROR;
+  if (mPreviewWindow.isPreviewEnabled()) {
+    /* Stop the camera. */
+    if (getCameraDevice()->isStarted()) {
+      getCameraDevice()->stopDeliveringFrames();
+      res = getCameraDevice()->stopDevice();
     }
 
-    return NO_ERROR;
+    if (res == NO_ERROR) {
+      /* Disable preview as well. */
+      mPreviewWindow.stopPreview();
+    }
+  }
+
+  return NO_ERROR;
 }
 
 /****************************************************************************
  * Private API.
  ***************************************************************************/
 
-status_t EmulatedCamera::cleanupCamera()
-{
-    status_t res = NO_ERROR;
+status_t EmulatedCamera::cleanupCamera() {
+  status_t res = NO_ERROR;
 
-    /* If preview is running - stop it. */
-    res = doStopPreview();
-    if (res != NO_ERROR) {
+  /* If preview is running - stop it. */
+  res = doStopPreview();
+  if (res != NO_ERROR) {
+    return -res;
+  }
+
+  /* Stop and disconnect the camera device. */
+  EmulatedCameraDevice* const camera_dev = getCameraDevice();
+  if (camera_dev != NULL) {
+    if (camera_dev->isStarted()) {
+      camera_dev->stopDeliveringFrames();
+      res = camera_dev->stopDevice();
+      if (res != NO_ERROR) {
         return -res;
+      }
     }
-
-    /* Stop and disconnect the camera device. */
-    EmulatedCameraDevice* const camera_dev = getCameraDevice();
-    if (camera_dev != NULL) {
-        if (camera_dev->isStarted()) {
-            camera_dev->stopDeliveringFrames();
-            res = camera_dev->stopDevice();
-            if (res != NO_ERROR) {
-                return -res;
-            }
-        }
-        if (camera_dev->isConnected()) {
-            res = camera_dev->disconnectDevice();
-            if (res != NO_ERROR) {
-                return -res;
-            }
-        }
+    if (camera_dev->isConnected()) {
+      res = camera_dev->disconnectDevice();
+      if (res != NO_ERROR) {
+        return -res;
+      }
     }
+  }
 
-    mCallbackNotifier.cleanupCBNotifier();
+  mCallbackNotifier.cleanupCBNotifier();
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 /****************************************************************************
@@ -807,256 +754,232 @@
  ***************************************************************************/
 
 int EmulatedCamera::set_preview_window(struct camera_device* dev,
-                                       struct preview_stream_ops* window)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->setPreviewWindow(window);
+                                       struct preview_stream_ops* window) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->setPreviewWindow(window);
 }
 
 void EmulatedCamera::set_callbacks(
-        struct camera_device* dev,
-        camera_notify_callback notify_cb,
-        camera_data_callback data_cb,
-        camera_data_timestamp_callback data_cb_timestamp,
-        camera_request_memory get_memory,
-        void* user)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return;
-    }
-    ec->setCallbacks(notify_cb, data_cb, data_cb_timestamp, get_memory, user);
+    struct camera_device* dev, camera_notify_callback notify_cb,
+    camera_data_callback data_cb,
+    camera_data_timestamp_callback data_cb_timestamp,
+    camera_request_memory get_memory, void* user) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return;
+  }
+  ec->setCallbacks(notify_cb, data_cb, data_cb_timestamp, get_memory, user);
 }
 
-void EmulatedCamera::enable_msg_type(struct camera_device* dev, int32_t msg_type)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return;
-    }
-    ec->enableMsgType(msg_type);
+void EmulatedCamera::enable_msg_type(struct camera_device* dev,
+                                     int32_t msg_type) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return;
+  }
+  ec->enableMsgType(msg_type);
 }
 
-void EmulatedCamera::disable_msg_type(struct camera_device* dev, int32_t msg_type)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return;
-    }
-    ec->disableMsgType(msg_type);
+void EmulatedCamera::disable_msg_type(struct camera_device* dev,
+                                      int32_t msg_type) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return;
+  }
+  ec->disableMsgType(msg_type);
 }
 
-int EmulatedCamera::msg_type_enabled(struct camera_device* dev, int32_t msg_type)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->isMsgTypeEnabled(msg_type);
+int EmulatedCamera::msg_type_enabled(struct camera_device* dev,
+                                     int32_t msg_type) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->isMsgTypeEnabled(msg_type);
 }
 
-int EmulatedCamera::start_preview(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->startPreview();
+int EmulatedCamera::start_preview(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->startPreview();
 }
 
-void EmulatedCamera::stop_preview(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return;
-    }
-    ec->stopPreview();
+void EmulatedCamera::stop_preview(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return;
+  }
+  ec->stopPreview();
 }
 
-int EmulatedCamera::preview_enabled(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->isPreviewEnabled();
+int EmulatedCamera::preview_enabled(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->isPreviewEnabled();
 }
 
 int EmulatedCamera::store_meta_data_in_buffers(struct camera_device* dev,
-                                               int enable)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->storeMetaDataInBuffers(enable);
+                                               int enable) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->storeMetaDataInBuffers(enable);
 }
 
-int EmulatedCamera::start_recording(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->startRecording();
+int EmulatedCamera::start_recording(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->startRecording();
 }
 
-void EmulatedCamera::stop_recording(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return;
-    }
-    ec->stopRecording();
+void EmulatedCamera::stop_recording(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return;
+  }
+  ec->stopRecording();
 }
 
-int EmulatedCamera::recording_enabled(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->isRecordingEnabled();
+int EmulatedCamera::recording_enabled(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->isRecordingEnabled();
 }
 
 void EmulatedCamera::release_recording_frame(struct camera_device* dev,
-                                             const void* opaque)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return;
-    }
-    ec->releaseRecordingFrame(opaque);
+                                             const void* opaque) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return;
+  }
+  ec->releaseRecordingFrame(opaque);
 }
 
-int EmulatedCamera::auto_focus(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->setAutoFocus();
+int EmulatedCamera::auto_focus(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->setAutoFocus();
 }
 
-int EmulatedCamera::cancel_auto_focus(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->cancelAutoFocus();
+int EmulatedCamera::cancel_auto_focus(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->cancelAutoFocus();
 }
 
-int EmulatedCamera::take_picture(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->takePicture();
+int EmulatedCamera::take_picture(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->takePicture();
 }
 
-int EmulatedCamera::cancel_picture(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->cancelPicture();
+int EmulatedCamera::cancel_picture(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->cancelPicture();
 }
 
-int EmulatedCamera::set_parameters(struct camera_device* dev, const char* parms)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->setParameters(parms);
+int EmulatedCamera::set_parameters(struct camera_device* dev,
+                                   const char* parms) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->setParameters(parms);
 }
 
-char* EmulatedCamera::get_parameters(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return NULL;
-    }
-    return ec->getParameters();
+char* EmulatedCamera::get_parameters(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return NULL;
+  }
+  return ec->getParameters();
 }
 
-void EmulatedCamera::put_parameters(struct camera_device* dev, char* params)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return;
-    }
-    ec->putParameters(params);
+void EmulatedCamera::put_parameters(struct camera_device* dev, char* params) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return;
+  }
+  ec->putParameters(params);
 }
 
-int EmulatedCamera::send_command(struct camera_device* dev,
-                                 int32_t cmd,
-                                 int32_t arg1,
-                                 int32_t arg2)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->sendCommand(cmd, arg1, arg2);
+int EmulatedCamera::send_command(struct camera_device* dev, int32_t cmd,
+                                 int32_t arg1, int32_t arg2) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->sendCommand(cmd, arg1, arg2);
 }
 
-void EmulatedCamera::release(struct camera_device* dev)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return;
-    }
-    ec->releaseCamera();
+void EmulatedCamera::release(struct camera_device* dev) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return;
+  }
+  ec->releaseCamera();
 }
 
-int EmulatedCamera::dump(struct camera_device* dev, int fd)
-{
-    EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->dumpCamera(fd);
+int EmulatedCamera::dump(struct camera_device* dev, int fd) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(dev->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->dumpCamera(fd);
 }
 
-int EmulatedCamera::close(struct hw_device_t* device)
-{
-    EmulatedCamera* ec =
-        reinterpret_cast<EmulatedCamera*>(reinterpret_cast<struct camera_device*>(device)->priv);
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->closeCamera();
+int EmulatedCamera::close(struct hw_device_t* device) {
+  EmulatedCamera* ec = reinterpret_cast<EmulatedCamera*>(
+      reinterpret_cast<struct camera_device*>(device)->priv);
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->closeCamera();
 }
 
 /****************************************************************************
@@ -1086,41 +1009,39 @@
     EmulatedCamera::put_parameters,
     EmulatedCamera::send_command,
     EmulatedCamera::release,
-    EmulatedCamera::dump
-};
+    EmulatedCamera::dump};
 
 /****************************************************************************
  * Common keys
  ***************************************************************************/
 
-const char EmulatedCamera::FACING_KEY[]         = "prop-facing";
-const char EmulatedCamera::ORIENTATION_KEY[]    = "prop-orientation";
+const char EmulatedCamera::FACING_KEY[] = "prop-facing";
+const char EmulatedCamera::ORIENTATION_KEY[] = "prop-orientation";
 const char EmulatedCamera::RECORDING_HINT_KEY[] = "recording-hint";
 
 /****************************************************************************
  * Common string values
  ***************************************************************************/
 
-const char EmulatedCamera::FACING_BACK[]      = "back";
-const char EmulatedCamera::FACING_FRONT[]     = "front";
+const char EmulatedCamera::FACING_BACK[] = "back";
+const char EmulatedCamera::FACING_FRONT[] = "front";
 
 /****************************************************************************
  * Helper routines
  ***************************************************************************/
 
-static char* AddValue(const char* param, const char* val)
-{
-    const size_t len1 = strlen(param);
-    const size_t len2 = strlen(val);
-    char* ret = reinterpret_cast<char*>(malloc(len1 + len2 + 2));
-    ALOGE_IF(ret == NULL, "%s: Memory failure", __FUNCTION__);
-    if (ret != NULL) {
-        memcpy(ret, param, len1);
-        ret[len1] = ',';
-        memcpy(ret + len1 + 1, val, len2);
-        ret[len1 + len2 + 1] = '\0';
-    }
-    return ret;
+static char* AddValue(const char* param, const char* val) {
+  const size_t len1 = strlen(param);
+  const size_t len2 = strlen(val);
+  char* ret = reinterpret_cast<char*>(malloc(len1 + len2 + 2));
+  ALOGE_IF(ret == NULL, "%s: Memory failure", __FUNCTION__);
+  if (ret != NULL) {
+    memcpy(ret, param, len1);
+    ret[len1] = ',';
+    memcpy(ret + len1 + 1, val, len2);
+    ret[len1 + len2 + 1] = '\0';
+  }
+  return ret;
 }
 
 /****************************************************************************
@@ -1129,34 +1050,34 @@
 
 #if DEBUG_PARAM
 static void PrintParamDiff(const CameraParameters& current,
-                            const char* new_par)
-{
-    char tmp[2048];
-    const char* wrk = new_par;
+                           const char* new_par) {
+  char tmp[2048];
+  const char* wrk = new_par;
 
-    /* Divided with ';' */
-    const char* next = strchr(wrk, ';');
-    while (next != NULL) {
-        snprintf(tmp, sizeof(tmp), "%.*s", (int)(intptr_t)(next-wrk), wrk);
-        /* in the form key=value */
-        char* val = strchr(tmp, '=');
-        if (val != NULL) {
-            *val = '\0'; val++;
-            const char* in_current = current.get(tmp);
-            if (in_current != NULL) {
-                if (strcmp(in_current, val)) {
-                    ALOGD("=== Value changed: %s: %s -> %s", tmp, in_current, val);
-                }
-            } else {
-                ALOGD("+++ New parameter: %s=%s", tmp, val);
-            }
-        } else {
-            ALOGW("No value separator in %s", tmp);
+  /* Divided with ';' */
+  const char* next = strchr(wrk, ';');
+  while (next != NULL) {
+    snprintf(tmp, sizeof(tmp), "%.*s", (int)(intptr_t)(next - wrk), wrk);
+    /* in the form key=value */
+    char* val = strchr(tmp, '=');
+    if (val != NULL) {
+      *val = '\0';
+      val++;
+      const char* in_current = current.get(tmp);
+      if (in_current != NULL) {
+        if (strcmp(in_current, val)) {
+          ALOGD("=== Value changed: %s: %s -> %s", tmp, in_current, val);
         }
-        wrk = next + 1;
-        next = strchr(wrk, ';');
+      } else {
+        ALOGD("+++ New parameter: %s=%s", tmp, val);
+      }
+    } else {
+      ALOGW("No value separator in %s", tmp);
     }
+    wrk = next + 1;
+    next = strchr(wrk, ';');
+  }
 }
-#endif  /* DEBUG_PARAM */
+#endif /* DEBUG_PARAM */
 
 }; /* namespace android */
diff --git a/guest/hals/camera/EmulatedCamera.h b/guest/hals/camera/EmulatedCamera.h
index 9976250..5c16d31 100644
--- a/guest/hals/camera/EmulatedCamera.h
+++ b/guest/hals/camera/EmulatedCamera.h
@@ -27,10 +27,10 @@
  */
 
 #include <camera/CameraParameters.h>
+#include "CallbackNotifier.h"
 #include "EmulatedBaseCamera.h"
 #include "EmulatedCameraDevice.h"
 #include "PreviewWindow.h"
-#include "CallbackNotifier.h"
 
 namespace android {
 
@@ -44,367 +44,361 @@
  * response to hw_module_methods_t::open, and camera_device::close callbacks.
  */
 class EmulatedCamera : public camera_device, public EmulatedBaseCamera {
-public:
-    /* Constructs EmulatedCamera instance.
-     * Param:
-     *  cameraId - Zero based camera identifier, which is an index of the camera
-     *      instance in camera factory's array.
-     *  module - Emulated camera HAL module descriptor.
-     */
-    EmulatedCamera(int cameraId,
-                   struct hw_module_t* module);
+ public:
+  /* Constructs EmulatedCamera instance.
+   * Param:
+   *  cameraId - Zero based camera identifier, which is an index of the camera
+   *      instance in camera factory's array.
+   *  module - Emulated camera HAL module descriptor.
+   */
+  EmulatedCamera(int cameraId, struct hw_module_t* module);
 
-    /* Destructs EmulatedCamera instance. */
-    virtual ~EmulatedCamera();
+  /* Destructs EmulatedCamera instance. */
+  virtual ~EmulatedCamera();
 
-    /****************************************************************************
-     * Abstract API
-     ***************************************************************************/
+  /****************************************************************************
+   * Abstract API
+   ***************************************************************************/
 
-public:
-    /* Gets emulated camera device used by this instance of the emulated camera.
-     */
-    virtual EmulatedCameraDevice* getCameraDevice() = 0;
+ public:
+  /* Gets emulated camera device used by this instance of the emulated camera.
+   */
+  virtual EmulatedCameraDevice* getCameraDevice() = 0;
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-public:
-    /** Override of base class method */
-    virtual status_t Initialize(const cvd::CameraDefinition& properties);
+ public:
+  /** Override of base class method */
+  virtual status_t Initialize(const cvd::CameraDefinition& properties);
 
-    /* Next frame is available in the camera device.
-     * This is a notification callback that is invoked by the camera device when
-     * a new frame is available.
-     * Note that most likely this method is called in context of a worker thread
-     * that camera device has created for frame capturing.
-     * Param:
-     *  frame - Captured frame, or NULL if camera device didn't pull the frame
-     *      yet. If NULL is passed in this parameter use GetCurrentFrame method
-     *      of the camera device class to obtain the next frame. Also note that
-     *      the size of the frame that is passed here (as well as the frame
-     *      returned from the GetCurrentFrame method) is defined by the current
-     *      frame settings (width + height + pixel format) for the camera device.
-     * timestamp - Frame's timestamp.
-     * camera_dev - Camera device instance that delivered the frame.
-     */
-    virtual void onNextFrameAvailable(const void* frame,
-                                      nsecs_t timestamp,
-                                      EmulatedCameraDevice* camera_dev);
+  /* Next frame is available in the camera device.
+   * This is a notification callback that is invoked by the camera device when
+   * a new frame is available.
+   * Note that most likely this method is called in context of a worker thread
+   * that camera device has created for frame capturing.
+   * Param:
+   *  frame - Captured frame, or NULL if camera device didn't pull the frame
+   *      yet. If NULL is passed in this parameter use GetCurrentFrame method
+   *      of the camera device class to obtain the next frame. Also note that
+   *      the size of the frame that is passed here (as well as the frame
+   *      returned from the GetCurrentFrame method) is defined by the current
+   *      frame settings (width + height + pixel format) for the camera device.
+   * timestamp - Frame's timestamp.
+   * camera_dev - Camera device instance that delivered the frame.
+   */
+  virtual void onNextFrameAvailable(const void* frame, nsecs_t timestamp,
+                                    EmulatedCameraDevice* camera_dev);
 
-    /* Entry point for notifications that occur in camera device.
-     * Param:
-     *  err - CAMERA_ERROR_XXX error code.
-     */
-    virtual void onCameraDeviceError(int err);
+  /* Entry point for notifications that occur in camera device.
+   * Param:
+   *  err - CAMERA_ERROR_XXX error code.
+   */
+  virtual void onCameraDeviceError(int err);
 
-    /* Device acquired focus.
-     * This is a notification callback that is invoked by the camera device
-     * when focusing operation (requested by client) completes.
-     */
-    virtual void onCameraFocusAcquired();
+  /* Device acquired focus.
+   * This is a notification callback that is invoked by the camera device
+   * when focusing operation (requested by client) completes.
+   */
+  virtual void onCameraFocusAcquired();
 
-    /****************************************************************************
-     * Camera API implementation
-     ***************************************************************************/
+  /****************************************************************************
+   * Camera API implementation
+   ***************************************************************************/
 
-public:
-    /** Override of base class method */
-    virtual status_t connectCamera(hw_device_t** device);
+ public:
+  /** Override of base class method */
+  virtual status_t connectCamera(hw_device_t** device);
 
-    /** Override of base class method */
-    virtual status_t closeCamera();
+  /** Override of base class method */
+  virtual status_t closeCamera();
 
-    /** Override of base class method */
-    virtual status_t getCameraInfo(struct camera_info* info);
+  /** Override of base class method */
+  virtual status_t getCameraInfo(struct camera_info* info);
 
-    /** Override of base class method */
-    virtual status_t getImageMetadata(struct ImageMetadata* meta);
+  /** Override of base class method */
+  virtual status_t getImageMetadata(struct ImageMetadata* meta);
 
-    /****************************************************************************
-     * Camera API implementation.
-     * These methods are called from the camera API callback routines.
-     ***************************************************************************/
+  /****************************************************************************
+   * Camera API implementation.
+   * These methods are called from the camera API callback routines.
+   ***************************************************************************/
 
-protected:
-    /* Actual handler for camera_device_ops_t::set_preview_window callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t setPreviewWindow(struct preview_stream_ops *window);
+ protected:
+  /* Actual handler for camera_device_ops_t::set_preview_window callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t setPreviewWindow(struct preview_stream_ops* window);
 
-    /* Actual handler for camera_device_ops_t::set_callbacks callback.
-     * NOTE: When this method is called the object is locked.
-     */
-    virtual void setCallbacks(camera_notify_callback notify_cb,
-                              camera_data_callback data_cb,
-                              camera_data_timestamp_callback data_cb_timestamp,
-                              camera_request_memory get_memory,
-                              void* user);
+  /* Actual handler for camera_device_ops_t::set_callbacks callback.
+   * NOTE: When this method is called the object is locked.
+   */
+  virtual void setCallbacks(camera_notify_callback notify_cb,
+                            camera_data_callback data_cb,
+                            camera_data_timestamp_callback data_cb_timestamp,
+                            camera_request_memory get_memory, void* user);
 
-    /* Actual handler for camera_device_ops_t::enable_msg_type callback.
-     * NOTE: When this method is called the object is locked.
-     */
-    virtual void enableMsgType(int32_t msg_type);
+  /* Actual handler for camera_device_ops_t::enable_msg_type callback.
+   * NOTE: When this method is called the object is locked.
+   */
+  virtual void enableMsgType(int32_t msg_type);
 
-    /* Actual handler for camera_device_ops_t::disable_msg_type callback.
-     * NOTE: When this method is called the object is locked.
-     */
-    virtual void disableMsgType(int32_t msg_type);
+  /* Actual handler for camera_device_ops_t::disable_msg_type callback.
+   * NOTE: When this method is called the object is locked.
+   */
+  virtual void disableMsgType(int32_t msg_type);
 
-    /* Actual handler for camera_device_ops_t::msg_type_enabled callback.
-     * NOTE: When this method is called the object is locked.
-     * Return:
-     *  0 if message(s) is (are) disabled, != 0 if enabled.
-     */
-    virtual int isMsgTypeEnabled(int32_t msg_type);
+  /* Actual handler for camera_device_ops_t::msg_type_enabled callback.
+   * NOTE: When this method is called the object is locked.
+   * Return:
+   *  0 if message(s) is (are) disabled, != 0 if enabled.
+   */
+  virtual int isMsgTypeEnabled(int32_t msg_type);
 
-    /* Actual handler for camera_device_ops_t::start_preview callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t startPreview();
+  /* Actual handler for camera_device_ops_t::start_preview callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t startPreview();
 
-    /* Actual handler for camera_device_ops_t::stop_preview callback.
-     * NOTE: When this method is called the object is locked.
-     */
-    virtual void stopPreview();
+  /* Actual handler for camera_device_ops_t::stop_preview callback.
+   * NOTE: When this method is called the object is locked.
+   */
+  virtual void stopPreview();
 
-    /* Actual handler for camera_device_ops_t::preview_enabled callback.
-     * NOTE: When this method is called the object is locked.
-     * Return:
-     *  0 if preview is disabled, != 0 if enabled.
-     */
-    virtual int isPreviewEnabled();
+  /* Actual handler for camera_device_ops_t::preview_enabled callback.
+   * NOTE: When this method is called the object is locked.
+   * Return:
+   *  0 if preview is disabled, != 0 if enabled.
+   */
+  virtual int isPreviewEnabled();
 
-    /* Actual handler for camera_device_ops_t::store_meta_data_in_buffers callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t storeMetaDataInBuffers(int enable);
+  /* Actual handler for camera_device_ops_t::store_meta_data_in_buffers
+   * callback. NOTE: When this method is called the object is locked. Note that
+   * failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t storeMetaDataInBuffers(int enable);
 
-    /* Actual handler for camera_device_ops_t::start_recording callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t startRecording();
+  /* Actual handler for camera_device_ops_t::start_recording callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t startRecording();
 
-    /* Actual handler for camera_device_ops_t::stop_recording callback.
-     * NOTE: When this method is called the object is locked.
-     */
-    virtual void stopRecording();
+  /* Actual handler for camera_device_ops_t::stop_recording callback.
+   * NOTE: When this method is called the object is locked.
+   */
+  virtual void stopRecording();
 
-    /* Actual handler for camera_device_ops_t::recording_enabled callback.
-     * NOTE: When this method is called the object is locked.
-     * Return:
-     *  0 if recording is disabled, != 0 if enabled.
-     */
-    virtual int isRecordingEnabled();
+  /* Actual handler for camera_device_ops_t::recording_enabled callback.
+   * NOTE: When this method is called the object is locked.
+   * Return:
+   *  0 if recording is disabled, != 0 if enabled.
+   */
+  virtual int isRecordingEnabled();
 
-    /* Actual handler for camera_device_ops_t::release_recording_frame callback.
-     * NOTE: When this method is called the object is locked.
-     */
-    virtual void releaseRecordingFrame(const void* opaque);
+  /* Actual handler for camera_device_ops_t::release_recording_frame callback.
+   * NOTE: When this method is called the object is locked.
+   */
+  virtual void releaseRecordingFrame(const void* opaque);
 
-    /* Actual handler for camera_device_ops_t::auto_focus callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t setAutoFocus();
+  /* Actual handler for camera_device_ops_t::auto_focus callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t setAutoFocus();
 
-    /* Actual handler for camera_device_ops_t::cancel_auto_focus callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t cancelAutoFocus();
+  /* Actual handler for camera_device_ops_t::cancel_auto_focus callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t cancelAutoFocus();
 
-    /* Actual handler for camera_device_ops_t::take_picture callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t takePicture();
+  /* Actual handler for camera_device_ops_t::take_picture callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t takePicture();
 
-    /* Actual handler for camera_device_ops_t::cancel_picture callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t cancelPicture();
+  /* Actual handler for camera_device_ops_t::cancel_picture callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t cancelPicture();
 
-    /* Actual handler for camera_device_ops_t::set_parameters callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t setParameters(const char* parms);
+  /* Actual handler for camera_device_ops_t::set_parameters callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t setParameters(const char* parms);
 
-    /* Actual handler for camera_device_ops_t::get_parameters callback.
-     * NOTE: When this method is called the object is locked.
-     * Return:
-     *  Flattened parameters string. The caller will free the buffer allocated
-     *  for the string by calling camera_device_ops_t::put_parameters callback.
-     */
-    virtual char* getParameters();
+  /* Actual handler for camera_device_ops_t::get_parameters callback.
+   * NOTE: When this method is called the object is locked.
+   * Return:
+   *  Flattened parameters string. The caller will free the buffer allocated
+   *  for the string by calling camera_device_ops_t::put_parameters callback.
+   */
+  virtual char* getParameters();
 
-    /* Actual handler for camera_device_ops_t::put_parameters callback.
-     * Called to free the string returned from camera_device_ops_t::get_parameters
-     * callback. There is nothing more to it: the name of the callback is just
-     * misleading.
-     * NOTE: When this method is called the object is locked.
-     */
-    virtual void putParameters(char* params);
+  /* Actual handler for camera_device_ops_t::put_parameters callback.
+   * Called to free the string returned from camera_device_ops_t::get_parameters
+   * callback. There is nothing more to it: the name of the callback is just
+   * misleading.
+   * NOTE: When this method is called the object is locked.
+   */
+  virtual void putParameters(char* params);
 
-    /* Actual handler for camera_device_ops_t::send_command callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
+  /* Actual handler for camera_device_ops_t::send_command callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
 
-    /* Actual handler for camera_device_ops_t::release callback.
-     * NOTE: When this method is called the object is locked.
-     */
-    virtual void releaseCamera();
+  /* Actual handler for camera_device_ops_t::release callback.
+   * NOTE: When this method is called the object is locked.
+   */
+  virtual void releaseCamera();
 
-    /* Actual handler for camera_device_ops_t::dump callback.
-     * NOTE: When this method is called the object is locked.
-     * Note that failures in this method are reported as negave EXXX statuses.
-     */
-    virtual status_t dumpCamera(int fd);
+  /* Actual handler for camera_device_ops_t::dump callback.
+   * NOTE: When this method is called the object is locked.
+   * Note that failures in this method are reported as negave EXXX statuses.
+   */
+  virtual status_t dumpCamera(int fd);
 
-    /****************************************************************************
-     * Preview management.
-     ***************************************************************************/
+  /****************************************************************************
+   * Preview management.
+   ***************************************************************************/
 
-protected:
-    /* Starts preview.
-     * Note that when this method is called mPreviewWindow may be NULL,
-     * indicating that framework has an intention to start displaying video
-     * frames, but didn't create the preview window yet.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    virtual status_t doStartPreview();
+ protected:
+  /* Starts preview.
+   * Note that when this method is called mPreviewWindow may be NULL,
+   * indicating that framework has an intention to start displaying video
+   * frames, but didn't create the preview window yet.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  virtual status_t doStartPreview();
 
-    /* Stops preview.
-     * This method reverts DoStartPreview.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    virtual status_t doStopPreview();
+  /* Stops preview.
+   * This method reverts DoStartPreview.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  virtual status_t doStopPreview();
 
-    /****************************************************************************
-     * Private API.
-     ***************************************************************************/
+  /****************************************************************************
+   * Private API.
+   ***************************************************************************/
 
-protected:
-    /* Cleans up camera when released. */
-    virtual status_t cleanupCamera();
+ protected:
+  /* Cleans up camera when released. */
+  virtual status_t cleanupCamera();
 
-    /****************************************************************************
-     * Camera API callbacks as defined by camera_device_ops structure.
-     * See hardware/libhardware/include/hardware/camera.h for information on
-     * each of these callbacks. Implemented in this class, these callbacks simply
-     * dispatch the call into an instance of EmulatedCamera class defined by the
-     * 'camera_device' parameter.
-     ***************************************************************************/
+  /****************************************************************************
+   * Camera API callbacks as defined by camera_device_ops structure.
+   * See hardware/libhardware/include/hardware/camera.h for information on
+   * each of these callbacks. Implemented in this class, these callbacks simply
+   * dispatch the call into an instance of EmulatedCamera class defined by the
+   * 'camera_device' parameter.
+   ***************************************************************************/
 
-private:
-    static int set_preview_window(struct camera_device* dev,
-                                   struct preview_stream_ops* window);
+ private:
+  static int set_preview_window(struct camera_device* dev,
+                                struct preview_stream_ops* window);
 
-    static void set_callbacks(struct camera_device* dev,
-                              camera_notify_callback notify_cb,
-                              camera_data_callback data_cb,
-                              camera_data_timestamp_callback data_cb_timestamp,
-                              camera_request_memory get_memory,
-                              void* user);
+  static void set_callbacks(struct camera_device* dev,
+                            camera_notify_callback notify_cb,
+                            camera_data_callback data_cb,
+                            camera_data_timestamp_callback data_cb_timestamp,
+                            camera_request_memory get_memory, void* user);
 
-    static void enable_msg_type(struct camera_device* dev, int32_t msg_type);
+  static void enable_msg_type(struct camera_device* dev, int32_t msg_type);
 
-    static void disable_msg_type(struct camera_device* dev, int32_t msg_type);
+  static void disable_msg_type(struct camera_device* dev, int32_t msg_type);
 
-    static int msg_type_enabled(struct camera_device* dev, int32_t msg_type);
+  static int msg_type_enabled(struct camera_device* dev, int32_t msg_type);
 
-    static int start_preview(struct camera_device* dev);
+  static int start_preview(struct camera_device* dev);
 
-    static void stop_preview(struct camera_device* dev);
+  static void stop_preview(struct camera_device* dev);
 
-    static int preview_enabled(struct camera_device* dev);
+  static int preview_enabled(struct camera_device* dev);
 
-    static int store_meta_data_in_buffers(struct camera_device* dev, int enable);
+  static int store_meta_data_in_buffers(struct camera_device* dev, int enable);
 
-    static int start_recording(struct camera_device* dev);
+  static int start_recording(struct camera_device* dev);
 
-    static void stop_recording(struct camera_device* dev);
+  static void stop_recording(struct camera_device* dev);
 
-    static int recording_enabled(struct camera_device* dev);
+  static int recording_enabled(struct camera_device* dev);
 
-    static void release_recording_frame(struct camera_device* dev,
-                                        const void* opaque);
+  static void release_recording_frame(struct camera_device* dev,
+                                      const void* opaque);
 
-    static int auto_focus(struct camera_device* dev);
+  static int auto_focus(struct camera_device* dev);
 
-    static int cancel_auto_focus(struct camera_device* dev);
+  static int cancel_auto_focus(struct camera_device* dev);
 
-    static int take_picture(struct camera_device* dev);
+  static int take_picture(struct camera_device* dev);
 
-    static int cancel_picture(struct camera_device* dev);
+  static int cancel_picture(struct camera_device* dev);
 
-    static int set_parameters(struct camera_device* dev, const char* parms);
+  static int set_parameters(struct camera_device* dev, const char* parms);
 
-    static char* get_parameters(struct camera_device* dev);
+  static char* get_parameters(struct camera_device* dev);
 
-    static void put_parameters(struct camera_device* dev, char* params);
+  static void put_parameters(struct camera_device* dev, char* params);
 
-    static int send_command(struct camera_device* dev,
-                            int32_t cmd,
-                            int32_t arg1,
-                            int32_t arg2);
+  static int send_command(struct camera_device* dev, int32_t cmd, int32_t arg1,
+                          int32_t arg2);
 
-    static void release(struct camera_device* dev);
+  static void release(struct camera_device* dev);
 
-    static int dump(struct camera_device* dev, int fd);
+  static int dump(struct camera_device* dev, int fd);
 
-    static int close(struct hw_device_t* device);
+  static int close(struct hw_device_t* device);
 
-    /****************************************************************************
-     * Data members
-     ***************************************************************************/
+  /****************************************************************************
+   * Data members
+   ***************************************************************************/
 
-protected:
-    /* Locks this instance for parameters, state, etc. change. */
-    Mutex                           mObjectLock;
+ protected:
+  /* Locks this instance for parameters, state, etc. change. */
+  Mutex mObjectLock;
 
-    /* Camera parameters. */
-    CameraParameters                mParameters;
+  /* Camera parameters. */
+  CameraParameters mParameters;
 
-    /* Preview window. */
-    PreviewWindow                   mPreviewWindow;
+  /* Preview window. */
+  PreviewWindow mPreviewWindow;
 
-    /* Callback notifier. */
-    CallbackNotifier                mCallbackNotifier;
+  /* Callback notifier. */
+  CallbackNotifier mCallbackNotifier;
 
-private:
-    /* Registered callbacks implementing camera API. */
-    static camera_device_ops_t      mDeviceOps;
+ private:
+  /* Registered callbacks implementing camera API. */
+  static camera_device_ops_t mDeviceOps;
 
-    /****************************************************************************
-     * Common keys
-     ***************************************************************************/
+  /****************************************************************************
+   * Common keys
+   ***************************************************************************/
 
-public:
-    static const char FACING_KEY[];
-    static const char ORIENTATION_KEY[];
-    static const char RECORDING_HINT_KEY[];
+ public:
+  static const char FACING_KEY[];
+  static const char ORIENTATION_KEY[];
+  static const char RECORDING_HINT_KEY[];
 
-     /****************************************************************************
-     * Common string values
-     ***************************************************************************/
+  /****************************************************************************
+   * Common string values
+   ***************************************************************************/
 
-    /* Possible values for FACING_KEY */
-    static const char FACING_BACK[];
-    static const char FACING_FRONT[];
+  /* Possible values for FACING_KEY */
+  static const char FACING_BACK[];
+  static const char FACING_FRONT[];
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_H */
diff --git a/guest/hals/camera/EmulatedCamera2.cpp b/guest/hals/camera/EmulatedCamera2.cpp
index bc5e391..a627371 100644
--- a/guest/hals/camera/EmulatedCamera2.cpp
+++ b/guest/hals/camera/EmulatedCamera2.cpp
@@ -37,36 +37,31 @@
  *      instance in camera factory's array.
  *  module - Emulated camera HAL module descriptor.
  */
-EmulatedCamera2::EmulatedCamera2(int cameraId,
-        struct hw_module_t* module):
-        EmulatedBaseCamera(cameraId,
-                CAMERA_DEVICE_API_VERSION_2_0,
-                &common,
-                module)
-{
-    common.close = EmulatedCamera2::close;
-    ops = &sDeviceOps;
-    priv = this;
+EmulatedCamera2::EmulatedCamera2(int cameraId, struct hw_module_t *module)
+    : EmulatedBaseCamera(cameraId, CAMERA_DEVICE_API_VERSION_2_0, &common,
+                         module) {
+  common.close = EmulatedCamera2::close;
+  ops = &sDeviceOps;
+  priv = this;
 
-    mNotifyCb = NULL;
+  mNotifyCb = NULL;
 
-    mRequestQueueSrc = NULL;
-    mFrameQueueDst = NULL;
+  mRequestQueueSrc = NULL;
+  mFrameQueueDst = NULL;
 
-    mVendorTagOps.get_camera_vendor_section_name =
-            EmulatedCamera2::get_camera_vendor_section_name;
-    mVendorTagOps.get_camera_vendor_tag_name =
-            EmulatedCamera2::get_camera_vendor_tag_name;
-    mVendorTagOps.get_camera_vendor_tag_type =
-            EmulatedCamera2::get_camera_vendor_tag_type;
-    mVendorTagOps.parent = this;
+  mVendorTagOps.get_camera_vendor_section_name =
+      EmulatedCamera2::get_camera_vendor_section_name;
+  mVendorTagOps.get_camera_vendor_tag_name =
+      EmulatedCamera2::get_camera_vendor_tag_name;
+  mVendorTagOps.get_camera_vendor_tag_type =
+      EmulatedCamera2::get_camera_vendor_tag_type;
+  mVendorTagOps.parent = this;
 
-    mStatusPresent = true;
+  mStatusPresent = true;
 }
 
 /* Destructs EmulatedCamera2 instance. */
-EmulatedCamera2::~EmulatedCamera2() {
-}
+EmulatedCamera2::~EmulatedCamera2() {}
 
 /****************************************************************************
  * Abstract API
@@ -76,25 +71,23 @@
  * Public API
  ***************************************************************************/
 
-status_t EmulatedCamera2::Initialize(const cvd::CameraDefinition& props) {
-    return NO_ERROR;
+status_t EmulatedCamera2::Initialize(const cvd::CameraDefinition &props) {
+  return NO_ERROR;
 }
 
 /****************************************************************************
  * Camera API implementation
  ***************************************************************************/
 
-status_t EmulatedCamera2::connectCamera(hw_device_t** device) {
-    *device = &common;
-    return NO_ERROR;
+status_t EmulatedCamera2::connectCamera(hw_device_t **device) {
+  *device = &common;
+  return NO_ERROR;
 }
 
-status_t EmulatedCamera2::closeCamera() {
-    return NO_ERROR;
-}
+status_t EmulatedCamera2::closeCamera() { return NO_ERROR; }
 
-status_t EmulatedCamera2::getCameraInfo(struct camera_info* info) {
-    return EmulatedBaseCamera::getCameraInfo(info);
+status_t EmulatedCamera2::getCameraInfo(struct camera_info *info) {
+  return EmulatedBaseCamera::getCameraInfo(info);
 }
 
 /****************************************************************************
@@ -104,103 +97,75 @@
 
 /** Request input queue */
 
-int EmulatedCamera2::requestQueueNotify() {
-    return INVALID_OPERATION;
-}
+int EmulatedCamera2::requestQueueNotify() { return INVALID_OPERATION; }
 
 /** Count of requests in flight */
-int EmulatedCamera2::getInProgressCount() {
-    return INVALID_OPERATION;
-}
+int EmulatedCamera2::getInProgressCount() { return INVALID_OPERATION; }
 
 /** Cancel all captures in flight */
-int EmulatedCamera2::flushCapturesInProgress() {
-    return INVALID_OPERATION;
-}
+int EmulatedCamera2::flushCapturesInProgress() { return INVALID_OPERATION; }
 
 /** Construct a default request for a given use case */
-int EmulatedCamera2::constructDefaultRequest(
-        int request_template,
-        camera_metadata_t **request) {
-    return INVALID_OPERATION;
+int EmulatedCamera2::constructDefaultRequest(int request_template,
+                                             camera_metadata_t **request) {
+  return INVALID_OPERATION;
 }
 
 /** Output stream creation and management */
 
-int EmulatedCamera2::allocateStream(
-        uint32_t width,
-        uint32_t height,
-        int format,
-        const camera2_stream_ops_t *stream_ops,
-        uint32_t *stream_id,
-        uint32_t *format_actual,
-        uint32_t *usage,
-        uint32_t *max_buffers) {
-    return INVALID_OPERATION;
+int EmulatedCamera2::allocateStream(uint32_t width, uint32_t height, int format,
+                                    const camera2_stream_ops_t *stream_ops,
+                                    uint32_t *stream_id,
+                                    uint32_t *format_actual, uint32_t *usage,
+                                    uint32_t *max_buffers) {
+  return INVALID_OPERATION;
 }
 
-int EmulatedCamera2::registerStreamBuffers(
-        uint32_t stream_id,
-        int num_buffers,
-        buffer_handle_t *buffers) {
-    return INVALID_OPERATION;
+int EmulatedCamera2::registerStreamBuffers(uint32_t stream_id, int num_buffers,
+                                           buffer_handle_t *buffers) {
+  return INVALID_OPERATION;
 }
 
-
 int EmulatedCamera2::releaseStream(uint32_t stream_id) {
-    return INVALID_OPERATION;
+  return INVALID_OPERATION;
 }
 
 /** Reprocessing input stream management */
 
 int EmulatedCamera2::allocateReprocessStream(
-        uint32_t width,
-        uint32_t height,
-        uint32_t format,
-        const camera2_stream_in_ops_t *reprocess_stream_ops,
-        uint32_t *stream_id,
-        uint32_t *consumer_usage,
-        uint32_t *max_buffers) {
-    return INVALID_OPERATION;
+    uint32_t width, uint32_t height, uint32_t format,
+    const camera2_stream_in_ops_t *reprocess_stream_ops, uint32_t *stream_id,
+    uint32_t *consumer_usage, uint32_t *max_buffers) {
+  return INVALID_OPERATION;
 }
 
 int EmulatedCamera2::allocateReprocessStreamFromStream(
-        uint32_t output_stream_id,
-        const camera2_stream_in_ops_t *reprocess_stream_ops,
-        uint32_t *stream_id) {
-    return INVALID_OPERATION;
+    uint32_t output_stream_id,
+    const camera2_stream_in_ops_t *reprocess_stream_ops, uint32_t *stream_id) {
+  return INVALID_OPERATION;
 }
 
 int EmulatedCamera2::releaseReprocessStream(uint32_t stream_id) {
-    return INVALID_OPERATION;
+  return INVALID_OPERATION;
 }
 
 /** 3A triggering */
 
-int EmulatedCamera2::triggerAction(uint32_t trigger_id,
-                                   int ext1, int ext2) {
-    return INVALID_OPERATION;
+int EmulatedCamera2::triggerAction(uint32_t trigger_id, int ext1, int ext2) {
+  return INVALID_OPERATION;
 }
 
 /** Custom tag query methods */
 
-const char* EmulatedCamera2::getVendorSectionName(uint32_t tag) {
-    return NULL;
-}
+const char *EmulatedCamera2::getVendorSectionName(uint32_t tag) { return NULL; }
 
-const char* EmulatedCamera2::getVendorTagName(uint32_t tag) {
-    return NULL;
-}
+const char *EmulatedCamera2::getVendorTagName(uint32_t tag) { return NULL; }
 
-int EmulatedCamera2::getVendorTagType(uint32_t tag) {
-    return -1;
-}
+int EmulatedCamera2::getVendorTagType(uint32_t tag) { return -1; }
 
 /** Debug methods */
 
-int EmulatedCamera2::dump(int fd) {
-    return INVALID_OPERATION;
-}
+int EmulatedCamera2::dump(int fd) { return INVALID_OPERATION; }
 
 /****************************************************************************
  * Private API.
@@ -214,178 +179,163 @@
  * 'camera_device2' parameter, or set a member value in the same.
  ***************************************************************************/
 
-EmulatedCamera2* getInstance(const camera2_device_t *d) {
-    const EmulatedCamera2* cec = static_cast<const EmulatedCamera2*>(d);
-    return const_cast<EmulatedCamera2*>(cec);
+EmulatedCamera2 *getInstance(const camera2_device_t *d) {
+  const EmulatedCamera2 *cec = static_cast<const EmulatedCamera2 *>(d);
+  return const_cast<EmulatedCamera2 *>(cec);
 }
 
-int EmulatedCamera2::set_request_queue_src_ops(const camera2_device_t *d,
-        const camera2_request_queue_src_ops *queue_src_ops) {
-    EmulatedCamera2* ec = getInstance(d);
-    ec->mRequestQueueSrc = queue_src_ops;
-    return NO_ERROR;
+int EmulatedCamera2::set_request_queue_src_ops(
+    const camera2_device_t *d,
+    const camera2_request_queue_src_ops *queue_src_ops) {
+  EmulatedCamera2 *ec = getInstance(d);
+  ec->mRequestQueueSrc = queue_src_ops;
+  return NO_ERROR;
 }
 
 int EmulatedCamera2::notify_request_queue_not_empty(const camera2_device_t *d) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->requestQueueNotify();
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->requestQueueNotify();
 }
 
-int EmulatedCamera2::set_frame_queue_dst_ops(const camera2_device_t *d,
-        const camera2_frame_queue_dst_ops *queue_dst_ops) {
-    EmulatedCamera2* ec = getInstance(d);
-    ec->mFrameQueueDst = queue_dst_ops;
-    return NO_ERROR;
+int EmulatedCamera2::set_frame_queue_dst_ops(
+    const camera2_device_t *d,
+    const camera2_frame_queue_dst_ops *queue_dst_ops) {
+  EmulatedCamera2 *ec = getInstance(d);
+  ec->mFrameQueueDst = queue_dst_ops;
+  return NO_ERROR;
 }
 
 int EmulatedCamera2::get_in_progress_count(const camera2_device_t *d) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->getInProgressCount();
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->getInProgressCount();
 }
 
 int EmulatedCamera2::flush_captures_in_progress(const camera2_device_t *d) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->flushCapturesInProgress();
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->flushCapturesInProgress();
 }
 
 int EmulatedCamera2::construct_default_request(const camera2_device_t *d,
-        int request_template,
-        camera_metadata_t **request) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->constructDefaultRequest(request_template, request);
+                                               int request_template,
+                                               camera_metadata_t **request) {
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->constructDefaultRequest(request_template, request);
 }
 
-int EmulatedCamera2::allocate_stream(const camera2_device_t *d,
-        uint32_t width,
-        uint32_t height,
-        int format,
-        const camera2_stream_ops_t *stream_ops,
-        uint32_t *stream_id,
-        uint32_t *format_actual,
-        uint32_t *usage,
-        uint32_t *max_buffers) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->allocateStream(width, height, format, stream_ops,
-            stream_id, format_actual, usage, max_buffers);
+int EmulatedCamera2::allocate_stream(const camera2_device_t *d, uint32_t width,
+                                     uint32_t height, int format,
+                                     const camera2_stream_ops_t *stream_ops,
+                                     uint32_t *stream_id,
+                                     uint32_t *format_actual, uint32_t *usage,
+                                     uint32_t *max_buffers) {
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->allocateStream(width, height, format, stream_ops, stream_id,
+                            format_actual, usage, max_buffers);
 }
 
 int EmulatedCamera2::register_stream_buffers(const camera2_device_t *d,
-        uint32_t stream_id,
-        int num_buffers,
-        buffer_handle_t *buffers) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->registerStreamBuffers(stream_id,
-            num_buffers,
-            buffers);
+                                             uint32_t stream_id,
+                                             int num_buffers,
+                                             buffer_handle_t *buffers) {
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->registerStreamBuffers(stream_id, num_buffers, buffers);
 }
 int EmulatedCamera2::release_stream(const camera2_device_t *d,
-        uint32_t stream_id) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->releaseStream(stream_id);
+                                    uint32_t stream_id) {
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->releaseStream(stream_id);
 }
 
-int EmulatedCamera2::allocate_reprocess_stream(const camera2_device_t *d,
-        uint32_t width,
-        uint32_t height,
-        uint32_t format,
-        const camera2_stream_in_ops_t *reprocess_stream_ops,
-        uint32_t *stream_id,
-        uint32_t *consumer_usage,
-        uint32_t *max_buffers) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->allocateReprocessStream(width, height, format,
-            reprocess_stream_ops, stream_id, consumer_usage, max_buffers);
+int EmulatedCamera2::allocate_reprocess_stream(
+    const camera2_device_t *d, uint32_t width, uint32_t height, uint32_t format,
+    const camera2_stream_in_ops_t *reprocess_stream_ops, uint32_t *stream_id,
+    uint32_t *consumer_usage, uint32_t *max_buffers) {
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->allocateReprocessStream(width, height, format,
+                                     reprocess_stream_ops, stream_id,
+                                     consumer_usage, max_buffers);
 }
 
 int EmulatedCamera2::allocate_reprocess_stream_from_stream(
-            const camera2_device_t *d,
-            uint32_t output_stream_id,
-            const camera2_stream_in_ops_t *reprocess_stream_ops,
-            uint32_t *stream_id) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->allocateReprocessStreamFromStream(output_stream_id,
-            reprocess_stream_ops, stream_id);
+    const camera2_device_t *d, uint32_t output_stream_id,
+    const camera2_stream_in_ops_t *reprocess_stream_ops, uint32_t *stream_id) {
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->allocateReprocessStreamFromStream(output_stream_id,
+                                               reprocess_stream_ops, stream_id);
 }
 
-
 int EmulatedCamera2::release_reprocess_stream(const camera2_device_t *d,
-        uint32_t stream_id) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->releaseReprocessStream(stream_id);
+                                              uint32_t stream_id) {
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->releaseReprocessStream(stream_id);
 }
 
 int EmulatedCamera2::trigger_action(const camera2_device_t *d,
-        uint32_t trigger_id,
-        int ext1,
-        int ext2) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->triggerAction(trigger_id, ext1, ext2);
+                                    uint32_t trigger_id, int ext1, int ext2) {
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->triggerAction(trigger_id, ext1, ext2);
 }
 
 int EmulatedCamera2::set_notify_callback(const camera2_device_t *d,
-        camera2_notify_callback notify_cb, void* user) {
-    EmulatedCamera2* ec = getInstance(d);
-    Mutex::Autolock l(ec->mMutex);
-    ec->mNotifyCb = notify_cb;
-    ec->mNotifyUserPtr = user;
-    return NO_ERROR;
+                                         camera2_notify_callback notify_cb,
+                                         void *user) {
+  EmulatedCamera2 *ec = getInstance(d);
+  Mutex::Autolock l(ec->mMutex);
+  ec->mNotifyCb = notify_cb;
+  ec->mNotifyUserPtr = user;
+  return NO_ERROR;
 }
 
 int EmulatedCamera2::get_metadata_vendor_tag_ops(const camera2_device_t *d,
-        vendor_tag_query_ops_t **ops) {
-    EmulatedCamera2* ec = getInstance(d);
-    *ops = static_cast<vendor_tag_query_ops_t*>(
-            &ec->mVendorTagOps);
-    return NO_ERROR;
+                                                 vendor_tag_query_ops_t **ops) {
+  EmulatedCamera2 *ec = getInstance(d);
+  *ops = static_cast<vendor_tag_query_ops_t *>(&ec->mVendorTagOps);
+  return NO_ERROR;
 }
 
-const char* EmulatedCamera2::get_camera_vendor_section_name(
-        const vendor_tag_query_ops_t *v,
-        uint32_t tag) {
-    EmulatedCamera2* ec = static_cast<const TagOps*>(v)->parent;
-    return ec->getVendorSectionName(tag);
+const char *EmulatedCamera2::get_camera_vendor_section_name(
+    const vendor_tag_query_ops_t *v, uint32_t tag) {
+  EmulatedCamera2 *ec = static_cast<const TagOps *>(v)->parent;
+  return ec->getVendorSectionName(tag);
 }
 
-const char* EmulatedCamera2::get_camera_vendor_tag_name(
-        const vendor_tag_query_ops_t *v,
-        uint32_t tag) {
-    EmulatedCamera2* ec = static_cast<const TagOps*>(v)->parent;
-    return ec->getVendorTagName(tag);
+const char *EmulatedCamera2::get_camera_vendor_tag_name(
+    const vendor_tag_query_ops_t *v, uint32_t tag) {
+  EmulatedCamera2 *ec = static_cast<const TagOps *>(v)->parent;
+  return ec->getVendorTagName(tag);
 }
 
-int EmulatedCamera2::get_camera_vendor_tag_type(
-        const vendor_tag_query_ops_t *v,
-        uint32_t tag)  {
-    EmulatedCamera2* ec = static_cast<const TagOps*>(v)->parent;
-    return ec->getVendorTagType(tag);
+int EmulatedCamera2::get_camera_vendor_tag_type(const vendor_tag_query_ops_t *v,
+                                                uint32_t tag) {
+  EmulatedCamera2 *ec = static_cast<const TagOps *>(v)->parent;
+  return ec->getVendorTagType(tag);
 }
 
 int EmulatedCamera2::dump(const camera2_device_t *d, int fd) {
-    EmulatedCamera2* ec = getInstance(d);
-    return ec->dump(fd);
+  EmulatedCamera2 *ec = getInstance(d);
+  return ec->dump(fd);
 }
 
-int EmulatedCamera2::close(struct hw_device_t* device) {
-    EmulatedCamera2* ec =
-            static_cast<EmulatedCamera2*>(
-                reinterpret_cast<camera2_device_t*>(device) );
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera2 device", __FUNCTION__);
-        return -EINVAL;
-    }
-    return ec->closeCamera();
+int EmulatedCamera2::close(struct hw_device_t *device) {
+  EmulatedCamera2 *ec = static_cast<EmulatedCamera2 *>(
+      reinterpret_cast<camera2_device_t *>(device));
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera2 device", __FUNCTION__);
+    return -EINVAL;
+  }
+  return ec->closeCamera();
 }
 
-void EmulatedCamera2::sendNotification(int32_t msgType,
-        int32_t ext1, int32_t ext2, int32_t ext3) {
-    camera2_notify_callback notifyCb;
-    {
-        Mutex::Autolock l(mMutex);
-        notifyCb = mNotifyCb;
-    }
-    if (notifyCb != NULL) {
-        notifyCb(msgType, ext1, ext2, ext3, mNotifyUserPtr);
-    }
+void EmulatedCamera2::sendNotification(int32_t msgType, int32_t ext1,
+                                       int32_t ext2, int32_t ext3) {
+  camera2_notify_callback notifyCb;
+  {
+    Mutex::Autolock l(mMutex);
+    notifyCb = mNotifyCb;
+  }
+  if (notifyCb != NULL) {
+    notifyCb(msgType, ext1, ext2, ext3, mNotifyUserPtr);
+  }
 }
 
 camera2_device_ops_t EmulatedCamera2::sDeviceOps = {
@@ -404,7 +354,6 @@
     EmulatedCamera2::trigger_action,
     EmulatedCamera2::set_notify_callback,
     EmulatedCamera2::get_metadata_vendor_tag_ops,
-    EmulatedCamera2::dump
-};
+    EmulatedCamera2::dump};
 
 }; /* namespace android */
diff --git a/guest/hals/camera/EmulatedCamera2.h b/guest/hals/camera/EmulatedCamera2.h
index baf5a18..143c466 100644
--- a/guest/hals/camera/EmulatedCamera2.h
+++ b/guest/hals/camera/EmulatedCamera2.h
@@ -25,11 +25,11 @@
  * for all camera API calls that defined by camera2_device_ops_t API.
  */
 
+#include <utils/Mutex.h>
+#include <utils/Thread.h>
+#include "EmulatedBaseCamera.h"
 #include "hardware/camera2.h"
 #include "system/camera_metadata.h"
-#include "EmulatedBaseCamera.h"
-#include <utils/Thread.h>
-#include <utils/Mutex.h>
 
 namespace android {
 
@@ -42,238 +42,207 @@
  * response to hw_module_methods_t::open, and camera_device::close callbacks.
  */
 class EmulatedCamera2 : public camera2_device, public EmulatedBaseCamera {
-public:
-    /* Constructs EmulatedCamera2 instance.
-     * Param:
-     *  cameraId - Zero based camera identifier, which is an index of the camera
-     *      instance in camera factory's array.
-     *  module - Emulated camera HAL module descriptor.
-     */
-    EmulatedCamera2(int cameraId,
-            struct hw_module_t* module);
+ public:
+  /* Constructs EmulatedCamera2 instance.
+   * Param:
+   *  cameraId - Zero based camera identifier, which is an index of the camera
+   *      instance in camera factory's array.
+   *  module - Emulated camera HAL module descriptor.
+   */
+  EmulatedCamera2(int cameraId, struct hw_module_t *module);
 
-    /* Destructs EmulatedCamera2 instance. */
-    virtual ~EmulatedCamera2();
+  /* Destructs EmulatedCamera2 instance. */
+  virtual ~EmulatedCamera2();
 
-    /****************************************************************************
-     * Abstract API
-     ***************************************************************************/
+  /****************************************************************************
+   * Abstract API
+   ***************************************************************************/
 
-public:
+ public:
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+ public:
+  virtual status_t Initialize(const cvd::CameraDefinition &props);
 
-public:
-    virtual status_t Initialize(const cvd::CameraDefinition& props);
+  /****************************************************************************
+   * Camera module API and generic hardware device API implementation
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Camera module API and generic hardware device API implementation
-     ***************************************************************************/
+ public:
+  virtual status_t connectCamera(hw_device_t **device);
 
-public:
-    virtual status_t connectCamera(hw_device_t** device);
+  virtual status_t closeCamera();
 
-    virtual status_t closeCamera();
+  virtual status_t getCameraInfo(struct camera_info *info) = 0;
 
-    virtual status_t getCameraInfo(struct camera_info* info) = 0;
+  virtual status_t getImageMetadata(struct ImageMetadata *meta) {
+    // TODO(ender): fill in Image metadata structure.
+    return ENOSYS;
+  }
 
-    virtual status_t getImageMetadata(struct ImageMetadata* meta) {
-        // TODO(ender): fill in Image metadata structure.
-        return ENOSYS;
-    }
+  /****************************************************************************
+   * Camera API implementation.
+   * These methods are called from the camera API callback routines.
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Camera API implementation.
-     * These methods are called from the camera API callback routines.
-     ***************************************************************************/
+ protected:
+  /** Request input queue notification */
+  virtual int requestQueueNotify();
 
-protected:
-    /** Request input queue notification */
-    virtual int requestQueueNotify();
+  /** Count of requests in flight */
+  virtual int getInProgressCount();
 
-    /** Count of requests in flight */
-    virtual int getInProgressCount();
+  /** Cancel all captures in flight */
+  virtual int flushCapturesInProgress();
 
-    /** Cancel all captures in flight */
-    virtual int flushCapturesInProgress();
+  virtual int constructDefaultRequest(int request_template,
+                                      camera_metadata_t **request);
 
-    virtual int constructDefaultRequest(
-        int request_template,
-        camera_metadata_t **request);
+  /** Output stream creation and management */
+  virtual int allocateStream(uint32_t width, uint32_t height, int format,
+                             const camera2_stream_ops_t *stream_ops,
+                             uint32_t *stream_id, uint32_t *format_actual,
+                             uint32_t *usage, uint32_t *max_buffers);
 
-    /** Output stream creation and management */
-    virtual int allocateStream(
-            uint32_t width,
-            uint32_t height,
-            int format,
-            const camera2_stream_ops_t *stream_ops,
-            uint32_t *stream_id,
-            uint32_t *format_actual,
-            uint32_t *usage,
-            uint32_t *max_buffers);
+  virtual int registerStreamBuffers(uint32_t stream_id, int num_buffers,
+                                    buffer_handle_t *buffers);
 
-    virtual int registerStreamBuffers(
-            uint32_t stream_id,
-            int num_buffers,
-            buffer_handle_t *buffers);
+  virtual int releaseStream(uint32_t stream_id);
 
-    virtual int releaseStream(uint32_t stream_id);
+  /** Input stream creation and management */
+  virtual int allocateReprocessStream(
+      uint32_t width, uint32_t height, uint32_t format,
+      const camera2_stream_in_ops_t *reprocess_stream_ops, uint32_t *stream_id,
+      uint32_t *consumer_usage, uint32_t *max_buffers);
 
-    /** Input stream creation and management */
-    virtual int allocateReprocessStream(
-            uint32_t width,
-            uint32_t height,
-            uint32_t format,
-            const camera2_stream_in_ops_t *reprocess_stream_ops,
-            uint32_t *stream_id,
-            uint32_t *consumer_usage,
-            uint32_t *max_buffers);
+  virtual int allocateReprocessStreamFromStream(
+      uint32_t output_stream_id,
+      const camera2_stream_in_ops_t *reprocess_stream_ops, uint32_t *stream_id);
 
-    virtual int allocateReprocessStreamFromStream(
-            uint32_t output_stream_id,
-            const camera2_stream_in_ops_t *reprocess_stream_ops,
-            uint32_t *stream_id);
+  virtual int releaseReprocessStream(uint32_t stream_id);
 
-    virtual int releaseReprocessStream(uint32_t stream_id);
+  /** 3A action triggering */
+  virtual int triggerAction(uint32_t trigger_id, int32_t ext1, int32_t ext2);
 
-    /** 3A action triggering */
-    virtual int triggerAction(uint32_t trigger_id,
-            int32_t ext1, int32_t ext2);
+  /** Custom tag definitions */
+  virtual const char *getVendorSectionName(uint32_t tag);
+  virtual const char *getVendorTagName(uint32_t tag);
+  virtual int getVendorTagType(uint32_t tag);
 
-    /** Custom tag definitions */
-    virtual const char* getVendorSectionName(uint32_t tag);
-    virtual const char* getVendorTagName(uint32_t tag);
-    virtual int         getVendorTagType(uint32_t tag);
+  /** Debug methods */
 
-    /** Debug methods */
+  virtual int dump(int fd);
 
-    virtual int dump(int fd);
+  /****************************************************************************
+   * Camera API callbacks as defined by camera2_device_ops structure.  See
+   * hardware/libhardware/include/hardware/camera2.h for information on each
+   * of these callbacks. Implemented in this class, these callbacks simply
+   * dispatch the call into an instance of EmulatedCamera2 class defined in
+   * the 'camera_device2' parameter.
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Camera API callbacks as defined by camera2_device_ops structure.  See
-     * hardware/libhardware/include/hardware/camera2.h for information on each
-     * of these callbacks. Implemented in this class, these callbacks simply
-     * dispatch the call into an instance of EmulatedCamera2 class defined in
-     * the 'camera_device2' parameter.
-     ***************************************************************************/
+ private:
+  /** Input request queue */
+  static int set_request_queue_src_ops(
+      const camera2_device_t *,
+      const camera2_request_queue_src_ops *queue_src_ops);
+  static int notify_request_queue_not_empty(const camera2_device_t *);
 
-private:
-    /** Input request queue */
-    static int set_request_queue_src_ops(const camera2_device_t *,
-            const camera2_request_queue_src_ops *queue_src_ops);
-    static int notify_request_queue_not_empty(const camera2_device_t *);
+  /** Output frame queue */
+  static int set_frame_queue_dst_ops(
+      const camera2_device_t *,
+      const camera2_frame_queue_dst_ops *queue_dst_ops);
 
-    /** Output frame queue */
-    static int set_frame_queue_dst_ops(const camera2_device_t *,
-            const camera2_frame_queue_dst_ops *queue_dst_ops);
+  /** In-progress request management */
+  static int get_in_progress_count(const camera2_device_t *);
 
-    /** In-progress request management */
-    static int get_in_progress_count(const camera2_device_t *);
+  static int flush_captures_in_progress(const camera2_device_t *);
 
-    static int flush_captures_in_progress(const camera2_device_t *);
+  /** Request template creation */
+  static int construct_default_request(const camera2_device_t *,
+                                       int request_template,
+                                       camera_metadata_t **request);
 
-    /** Request template creation */
-    static int construct_default_request(const camera2_device_t *,
-            int request_template,
-            camera_metadata_t **request);
+  /** Stream management */
+  static int allocate_stream(const camera2_device_t *, uint32_t width,
+                             uint32_t height, int format,
+                             const camera2_stream_ops_t *stream_ops,
+                             uint32_t *stream_id, uint32_t *format_actual,
+                             uint32_t *usage, uint32_t *max_buffers);
 
-    /** Stream management */
-    static int allocate_stream(const camera2_device_t *,
-            uint32_t width,
-            uint32_t height,
-            int format,
-            const camera2_stream_ops_t *stream_ops,
-            uint32_t *stream_id,
-            uint32_t *format_actual,
-            uint32_t *usage,
-            uint32_t *max_buffers);
+  static int register_stream_buffers(const camera2_device_t *,
+                                     uint32_t stream_id, int num_buffers,
+                                     buffer_handle_t *buffers);
 
-    static int register_stream_buffers(const camera2_device_t *,
-            uint32_t stream_id,
-            int num_buffers,
-            buffer_handle_t *buffers);
+  static int release_stream(const camera2_device_t *, uint32_t stream_id);
 
-    static int release_stream(const camera2_device_t *,
-            uint32_t stream_id);
+  static int allocate_reprocess_stream(
+      const camera2_device_t *, uint32_t width, uint32_t height,
+      uint32_t format, const camera2_stream_in_ops_t *reprocess_stream_ops,
+      uint32_t *stream_id, uint32_t *consumer_usage, uint32_t *max_buffers);
 
-    static int allocate_reprocess_stream(const camera2_device_t *,
-            uint32_t width,
-            uint32_t height,
-            uint32_t format,
-            const camera2_stream_in_ops_t *reprocess_stream_ops,
-            uint32_t *stream_id,
-            uint32_t *consumer_usage,
-            uint32_t *max_buffers);
+  static int allocate_reprocess_stream_from_stream(
+      const camera2_device_t *, uint32_t output_stream_id,
+      const camera2_stream_in_ops_t *reprocess_stream_ops, uint32_t *stream_id);
 
-    static int allocate_reprocess_stream_from_stream(const camera2_device_t *,
-            uint32_t output_stream_id,
-            const camera2_stream_in_ops_t *reprocess_stream_ops,
-            uint32_t *stream_id);
+  static int release_reprocess_stream(const camera2_device_t *,
+                                      uint32_t stream_id);
 
-    static int release_reprocess_stream(const camera2_device_t *,
-            uint32_t stream_id);
+  /** 3A triggers*/
+  static int trigger_action(const camera2_device_t *, uint32_t trigger_id,
+                            int ext1, int ext2);
 
-    /** 3A triggers*/
-    static int trigger_action(const camera2_device_t *,
-            uint32_t trigger_id,
-            int ext1,
-            int ext2);
+  /** Notifications to application */
+  static int set_notify_callback(const camera2_device_t *,
+                                 camera2_notify_callback notify_cb, void *user);
 
-    /** Notifications to application */
-    static int set_notify_callback(const camera2_device_t *,
-            camera2_notify_callback notify_cb,
-            void *user);
+  /** Vendor metadata registration */
+  static int get_metadata_vendor_tag_ops(const camera2_device_t *,
+                                         vendor_tag_query_ops_t **ops);
+  // for get_metadata_vendor_tag_ops
+  static const char *get_camera_vendor_section_name(
+      const vendor_tag_query_ops_t *, uint32_t tag);
+  static const char *get_camera_vendor_tag_name(const vendor_tag_query_ops_t *,
+                                                uint32_t tag);
+  static int get_camera_vendor_tag_type(const vendor_tag_query_ops_t *,
+                                        uint32_t tag);
 
-    /** Vendor metadata registration */
-    static int get_metadata_vendor_tag_ops(const camera2_device_t *,
-            vendor_tag_query_ops_t **ops);
-    // for get_metadata_vendor_tag_ops
-    static const char* get_camera_vendor_section_name(
-            const vendor_tag_query_ops_t *,
-            uint32_t tag);
-    static const char* get_camera_vendor_tag_name(
-            const vendor_tag_query_ops_t *,
-            uint32_t tag);
-    static int get_camera_vendor_tag_type(
-            const vendor_tag_query_ops_t *,
-            uint32_t tag);
+  static int dump(const camera2_device_t *, int fd);
 
-    static int dump(const camera2_device_t *, int fd);
+  /** For hw_device_t ops */
+  static int close(struct hw_device_t *device);
 
-    /** For hw_device_t ops */
-    static int close(struct hw_device_t* device);
+  /****************************************************************************
+   * Data members shared with implementations
+   ***************************************************************************/
+ protected:
+  /** Mutex for calls through camera2 device interface */
+  Mutex mMutex;
 
-    /****************************************************************************
-     * Data members shared with implementations
-     ***************************************************************************/
-  protected:
-    /** Mutex for calls through camera2 device interface */
-    Mutex mMutex;
+  bool mStatusPresent;
 
-    bool mStatusPresent;
+  const camera2_request_queue_src_ops *mRequestQueueSrc;
+  const camera2_frame_queue_dst_ops *mFrameQueueDst;
 
-    const camera2_request_queue_src_ops *mRequestQueueSrc;
-    const camera2_frame_queue_dst_ops *mFrameQueueDst;
+  struct TagOps : public vendor_tag_query_ops {
+    EmulatedCamera2 *parent;
+  };
+  TagOps mVendorTagOps;
 
-    struct TagOps : public vendor_tag_query_ops {
-        EmulatedCamera2 *parent;
-    };
-    TagOps      mVendorTagOps;
+  void sendNotification(int32_t msgType, int32_t ext1, int32_t ext2,
+                        int32_t ext3);
 
-    void sendNotification(int32_t msgType,
-            int32_t ext1, int32_t ext2, int32_t ext3);
-
-    /****************************************************************************
-     * Data members
-     ***************************************************************************/
-  private:
-    static camera2_device_ops_t sDeviceOps;
-    camera2_notify_callback mNotifyCb;
-    void* mNotifyUserPtr;
+  /****************************************************************************
+   * Data members
+   ***************************************************************************/
+ private:
+  static camera2_device_ops_t sDeviceOps;
+  camera2_notify_callback mNotifyCb;
+  void *mNotifyUserPtr;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_CAMERA2_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA2_H */
diff --git a/guest/hals/camera/EmulatedCamera3.cpp b/guest/hals/camera/EmulatedCamera3.cpp
index c9e48a5..0bc9571 100644
--- a/guest/hals/camera/EmulatedCamera3.cpp
+++ b/guest/hals/camera/EmulatedCamera3.cpp
@@ -38,24 +38,18 @@
  *      instance in camera factory's array.
  *  module - Emulated camera HAL module descriptor.
  */
-EmulatedCamera3::EmulatedCamera3(int cameraId,
-        struct hw_module_t* module):
-        EmulatedBaseCamera(cameraId,
-                CAMERA_DEVICE_API_VERSION_3_3,
-                &common,
-                module),
-        mStatus(STATUS_ERROR)
-{
-    common.close = EmulatedCamera3::close;
-    ops = &sDeviceOps;
+EmulatedCamera3::EmulatedCamera3(int cameraId, struct hw_module_t* module)
+    : EmulatedBaseCamera(cameraId, CAMERA_DEVICE_API_VERSION_3_3, &common,
+                         module),
+      mStatus(STATUS_ERROR) {
+  common.close = EmulatedCamera3::close;
+  ops = &sDeviceOps;
 
-    mCallbackOps = NULL;
-
+  mCallbackOps = NULL;
 }
 
 /* Destructs EmulatedCamera3 instance. */
-EmulatedCamera3::~EmulatedCamera3() {
-}
+EmulatedCamera3::~EmulatedCamera3() {}
 
 /****************************************************************************
  * Abstract API
@@ -66,10 +60,10 @@
  ***************************************************************************/
 
 status_t EmulatedCamera3::Initialize(const cvd::CameraDefinition& params) {
-    ALOGV("%s", __FUNCTION__);
+  ALOGV("%s", __FUNCTION__);
 
-    mStatus = STATUS_CLOSED;
-    return NO_ERROR;
+  mStatus = STATUS_CLOSED;
+  return NO_ERROR;
 }
 
 /****************************************************************************
@@ -77,27 +71,26 @@
  ***************************************************************************/
 
 status_t EmulatedCamera3::connectCamera(hw_device_t** device) {
-    ALOGV("%s", __FUNCTION__);
-    if (device == NULL) return BAD_VALUE;
+  ALOGV("%s", __FUNCTION__);
+  if (device == NULL) return BAD_VALUE;
 
-    if (mStatus != STATUS_CLOSED) {
-        ALOGE("%s: Trying to open a camera in state %d!",
-                __FUNCTION__, mStatus);
-        return INVALID_OPERATION;
-    }
+  if (mStatus != STATUS_CLOSED) {
+    ALOGE("%s: Trying to open a camera in state %d!", __FUNCTION__, mStatus);
+    return INVALID_OPERATION;
+  }
 
-    *device = &common;
-    mStatus = STATUS_OPEN;
-    return NO_ERROR;
+  *device = &common;
+  mStatus = STATUS_OPEN;
+  return NO_ERROR;
 }
 
 status_t EmulatedCamera3::closeCamera() {
-    mStatus = STATUS_CLOSED;
-    return NO_ERROR;
+  mStatus = STATUS_CLOSED;
+  return NO_ERROR;
 }
 
 status_t EmulatedCamera3::getCameraInfo(struct camera_info* info) {
-    return EmulatedBaseCamera::getCameraInfo(info);
+  return EmulatedBaseCamera::getCameraInfo(info);
 }
 
 /****************************************************************************
@@ -106,71 +99,70 @@
  ***************************************************************************/
 
 status_t EmulatedCamera3::initializeDevice(
-        const camera3_callback_ops *callbackOps) {
-    if (callbackOps == NULL) {
-        ALOGE("%s: NULL callback ops provided to HAL!",
-                __FUNCTION__);
-        return BAD_VALUE;
-    }
+    const camera3_callback_ops* callbackOps) {
+  if (callbackOps == NULL) {
+    ALOGE("%s: NULL callback ops provided to HAL!", __FUNCTION__);
+    return BAD_VALUE;
+  }
 
-    if (mStatus != STATUS_OPEN) {
-        ALOGE("%s: Trying to initialize a camera in state %d!",
-                __FUNCTION__, mStatus);
-        return INVALID_OPERATION;
-    }
+  if (mStatus != STATUS_OPEN) {
+    ALOGE("%s: Trying to initialize a camera in state %d!", __FUNCTION__,
+          mStatus);
+    return INVALID_OPERATION;
+  }
 
-    mCallbackOps = callbackOps;
-    mStatus = STATUS_READY;
+  mCallbackOps = callbackOps;
+  mStatus = STATUS_READY;
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 status_t EmulatedCamera3::configureStreams(
-        camera3_stream_configuration *streamList) {
-    ALOGE("%s: Not implemented", __FUNCTION__);
-    return INVALID_OPERATION;
+    camera3_stream_configuration* streamList) {
+  ALOGE("%s: Not implemented", __FUNCTION__);
+  return INVALID_OPERATION;
 }
 
 status_t EmulatedCamera3::registerStreamBuffers(
-        const camera3_stream_buffer_set *bufferSet) {
-    ALOGE("%s: Not implemented", __FUNCTION__);
-    return INVALID_OPERATION;
+    const camera3_stream_buffer_set* bufferSet) {
+  ALOGE("%s: Not implemented", __FUNCTION__);
+  return INVALID_OPERATION;
 }
 
 const camera_metadata_t* EmulatedCamera3::constructDefaultRequestSettings(
-        int type) {
-    ALOGE("%s: Not implemented", __FUNCTION__);
-    return NULL;
+    int type) {
+  ALOGE("%s: Not implemented", __FUNCTION__);
+  return NULL;
 }
 
 status_t EmulatedCamera3::processCaptureRequest(
-        camera3_capture_request *request) {
-    ALOGE("%s: Not implemented", __FUNCTION__);
-    return INVALID_OPERATION;
+    camera3_capture_request* request) {
+  ALOGE("%s: Not implemented", __FUNCTION__);
+  return INVALID_OPERATION;
 }
 
 status_t EmulatedCamera3::flush() {
-    ALOGE("%s: Not implemented", __FUNCTION__);
-    return INVALID_OPERATION;
+  ALOGE("%s: Not implemented", __FUNCTION__);
+  return INVALID_OPERATION;
 }
 
 /** Debug methods */
 
 void EmulatedCamera3::dump(int fd) {
-    ALOGE("%s: Not implemented", __FUNCTION__);
-    return;
+  ALOGE("%s: Not implemented", __FUNCTION__);
+  return;
 }
 
 /****************************************************************************
  * Protected API. Callbacks to the framework.
  ***************************************************************************/
 
-void EmulatedCamera3::sendCaptureResult(camera3_capture_result_t *result) {
-    mCallbackOps->process_capture_result(mCallbackOps, result);
+void EmulatedCamera3::sendCaptureResult(camera3_capture_result_t* result) {
+  mCallbackOps->process_capture_result(mCallbackOps, result);
 }
 
-void EmulatedCamera3::sendNotify(camera3_notify_msg_t *msg) {
-    mCallbackOps->notify(mCallbackOps, msg);
+void EmulatedCamera3::sendNotify(camera3_notify_msg_t* msg) {
+  mCallbackOps->notify(mCallbackOps, msg);
 }
 
 /****************************************************************************
@@ -185,62 +177,61 @@
  * 'camera_device3' parameter, or set a member value in the same.
  ***************************************************************************/
 
-EmulatedCamera3* getInstance(const camera3_device_t *d) {
-    const EmulatedCamera3* cec = static_cast<const EmulatedCamera3*>(d);
-    return const_cast<EmulatedCamera3*>(cec);
+EmulatedCamera3* getInstance(const camera3_device_t* d) {
+  const EmulatedCamera3* cec = static_cast<const EmulatedCamera3*>(d);
+  return const_cast<EmulatedCamera3*>(cec);
 }
 
-int EmulatedCamera3::initialize(const struct camera3_device *d,
-        const camera3_callback_ops_t *callback_ops) {
-    EmulatedCamera3* ec = getInstance(d);
-    return ec->initializeDevice(callback_ops);
+int EmulatedCamera3::initialize(const struct camera3_device* d,
+                                const camera3_callback_ops_t* callback_ops) {
+  EmulatedCamera3* ec = getInstance(d);
+  return ec->initializeDevice(callback_ops);
 }
 
-int EmulatedCamera3::configure_streams(const struct camera3_device *d,
-        camera3_stream_configuration_t *stream_list) {
-    EmulatedCamera3* ec = getInstance(d);
-    return ec->configureStreams(stream_list);
+int EmulatedCamera3::configure_streams(
+    const struct camera3_device* d,
+    camera3_stream_configuration_t* stream_list) {
+  EmulatedCamera3* ec = getInstance(d);
+  return ec->configureStreams(stream_list);
 }
 
 int EmulatedCamera3::register_stream_buffers(
-        const struct camera3_device *d,
-        const camera3_stream_buffer_set_t *buffer_set) {
-    EmulatedCamera3* ec = getInstance(d);
-    return ec->registerStreamBuffers(buffer_set);
+    const struct camera3_device* d,
+    const camera3_stream_buffer_set_t* buffer_set) {
+  EmulatedCamera3* ec = getInstance(d);
+  return ec->registerStreamBuffers(buffer_set);
 }
 
 int EmulatedCamera3::process_capture_request(
-        const struct camera3_device *d,
-        camera3_capture_request_t *request) {
-    EmulatedCamera3* ec = getInstance(d);
-    return ec->processCaptureRequest(request);
+    const struct camera3_device* d, camera3_capture_request_t* request) {
+  EmulatedCamera3* ec = getInstance(d);
+  return ec->processCaptureRequest(request);
 }
 
 const camera_metadata_t* EmulatedCamera3::construct_default_request_settings(
-        const camera3_device_t *d, int type) {
-    EmulatedCamera3* ec = getInstance(d);
-    return ec->constructDefaultRequestSettings(type);
+    const camera3_device_t* d, int type) {
+  EmulatedCamera3* ec = getInstance(d);
+  return ec->constructDefaultRequestSettings(type);
 }
 
-void EmulatedCamera3::dump(const camera3_device_t *d, int fd) {
-    EmulatedCamera3* ec = getInstance(d);
-    ec->dump(fd);
+void EmulatedCamera3::dump(const camera3_device_t* d, int fd) {
+  EmulatedCamera3* ec = getInstance(d);
+  ec->dump(fd);
 }
 
-int EmulatedCamera3::flush(const camera3_device_t *d) {
-    EmulatedCamera3* ec = getInstance(d);
-    return ec->flush();
+int EmulatedCamera3::flush(const camera3_device_t* d) {
+  EmulatedCamera3* ec = getInstance(d);
+  return ec->flush();
 }
 
 int EmulatedCamera3::close(struct hw_device_t* device) {
-    EmulatedCamera3* ec =
-            static_cast<EmulatedCamera3*>(
-                reinterpret_cast<camera3_device_t*>(device) );
-    if (ec == NULL) {
-        ALOGE("%s: Unexpected NULL camera3 device", __FUNCTION__);
-        return BAD_VALUE;
-    }
-    return ec->closeCamera();
+  EmulatedCamera3* ec = static_cast<EmulatedCamera3*>(
+      reinterpret_cast<camera3_device_t*>(device));
+  if (ec == NULL) {
+    ALOGE("%s: Unexpected NULL camera3 device", __FUNCTION__);
+    return BAD_VALUE;
+  }
+  return ec->closeCamera();
 }
 
 camera3_device_ops_t EmulatedCamera3::sDeviceOps = {
@@ -251,8 +242,7 @@
     EmulatedCamera3::process_capture_request,
     /* DEPRECATED: get_metadata_vendor_tag_ops */ nullptr,
     EmulatedCamera3::dump,
-    EmulatedCamera3::flush
-};
+    EmulatedCamera3::flush};
 
 const char* EmulatedCamera3::sAvailableCapabilitiesStrings[NUM_CAPABILITIES] = {
     "BACKWARD_COMPATIBLE",
@@ -265,7 +255,6 @@
     "YUV_REPROCESSING",
     "DEPTH_OUTPUT",
     "CONSTRAINED_HIGH_SPEED_VIDEO",
-    "FULL_LEVEL"
-};
+    "FULL_LEVEL"};
 
 }; /* namespace android */
diff --git a/guest/hals/camera/EmulatedCamera3.h b/guest/hals/camera/EmulatedCamera3.h
index e7a2ac1..55e60c7 100644
--- a/guest/hals/camera/EmulatedCamera3.h
+++ b/guest/hals/camera/EmulatedCamera3.h
@@ -25,9 +25,9 @@
  * for all camera API calls that defined by camera3_device_ops_t API.
  */
 
+#include "EmulatedBaseCamera.h"
 #include "hardware/camera3.h"
 #include "system/camera_metadata.h"
-#include "EmulatedBaseCamera.h"
 
 namespace android {
 
@@ -41,168 +41,161 @@
  * response to hw_module_methods_t::open, and camera_device::close callbacks.
  */
 class EmulatedCamera3 : public camera3_device, public EmulatedBaseCamera {
-public:
-    /* Constructs EmulatedCamera3 instance.
-     * Param:
-     *  cameraId - Zero based camera identifier, which is an index of the camera
-     *      instance in camera factory's array.
-     *  module - Emulated camera HAL module descriptor.
-     */
-    EmulatedCamera3(int cameraId,
-            struct hw_module_t* module);
+ public:
+  /* Constructs EmulatedCamera3 instance.
+   * Param:
+   *  cameraId - Zero based camera identifier, which is an index of the camera
+   *      instance in camera factory's array.
+   *  module - Emulated camera HAL module descriptor.
+   */
+  EmulatedCamera3(int cameraId, struct hw_module_t *module);
 
-    /* Destructs EmulatedCamera2 instance. */
-    virtual ~EmulatedCamera3();
+  /* Destructs EmulatedCamera2 instance. */
+  virtual ~EmulatedCamera3();
 
-    /* List of all defined capabilities plus useful HW levels */
-    enum AvailableCapabilities {
-        BACKWARD_COMPATIBLE,
-        MANUAL_SENSOR,
-        MANUAL_POST_PROCESSING,
-        RAW,
-        PRIVATE_REPROCESSING,
-        READ_SENSOR_SETTINGS,
-        BURST_CAPTURE,
-        YUV_REPROCESSING,
-        DEPTH_OUTPUT,
-        CONSTRAINED_HIGH_SPEED_VIDEO,
-        // Levels
-        FULL_LEVEL,
+  /* List of all defined capabilities plus useful HW levels */
+  enum AvailableCapabilities {
+    BACKWARD_COMPATIBLE,
+    MANUAL_SENSOR,
+    MANUAL_POST_PROCESSING,
+    RAW,
+    PRIVATE_REPROCESSING,
+    READ_SENSOR_SETTINGS,
+    BURST_CAPTURE,
+    YUV_REPROCESSING,
+    DEPTH_OUTPUT,
+    CONSTRAINED_HIGH_SPEED_VIDEO,
+    // Levels
+    FULL_LEVEL,
 
-        NUM_CAPABILITIES
-    };
+    NUM_CAPABILITIES
+  };
 
-    // Char strings for above enum, with size NUM_CAPABILITIES
-    static const char *sAvailableCapabilitiesStrings[];
+  // Char strings for above enum, with size NUM_CAPABILITIES
+  static const char *sAvailableCapabilitiesStrings[];
 
-    /****************************************************************************
-     * Abstract API
-     ***************************************************************************/
+  /****************************************************************************
+   * Abstract API
+   ***************************************************************************/
 
-public:
+ public:
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+ public:
+  virtual status_t Initialize(const cvd::CameraDefinition &params);
 
-public:
-    virtual status_t Initialize(const cvd::CameraDefinition& params);
+  /****************************************************************************
+   * Camera module API and generic hardware device API implementation
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Camera module API and generic hardware device API implementation
-     ***************************************************************************/
+ public:
+  virtual status_t connectCamera(hw_device_t **device);
 
-public:
-    virtual status_t connectCamera(hw_device_t** device);
+  virtual status_t closeCamera();
 
-    virtual status_t closeCamera();
+  virtual status_t getCameraInfo(struct camera_info *info);
 
-    virtual status_t getCameraInfo(struct camera_info* info);
+  virtual status_t getImageMetadata(struct ImageMetadata *meta) {
+    // TODO(ender): fill in Image metadata structure.
+    return ENOSYS;
+  }
 
-    virtual status_t getImageMetadata(struct ImageMetadata* meta) {
-        // TODO(ender): fill in Image metadata structure.
-        return ENOSYS;
-    }
+  /****************************************************************************
+   * Camera API implementation.
+   * These methods are called from the camera API callback routines.
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Camera API implementation.
-     * These methods are called from the camera API callback routines.
-     ***************************************************************************/
+ protected:
+  virtual status_t initializeDevice(const camera3_callback_ops *callbackOps);
 
-protected:
+  virtual status_t configureStreams(camera3_stream_configuration *streamList);
 
-    virtual status_t initializeDevice(
-        const camera3_callback_ops *callbackOps);
+  virtual status_t registerStreamBuffers(
+      const camera3_stream_buffer_set *bufferSet);
 
-    virtual status_t configureStreams(
-        camera3_stream_configuration *streamList);
+  virtual const camera_metadata_t *constructDefaultRequestSettings(int type);
 
-    virtual status_t registerStreamBuffers(
-        const camera3_stream_buffer_set *bufferSet) ;
+  virtual status_t processCaptureRequest(camera3_capture_request *request);
 
-    virtual const camera_metadata_t* constructDefaultRequestSettings(
-        int type);
+  virtual status_t flush();
 
-    virtual status_t processCaptureRequest(camera3_capture_request *request);
+  /** Debug methods */
 
-    virtual status_t flush();
+  virtual void dump(int fd);
 
-    /** Debug methods */
+  /****************************************************************************
+   * Camera API callbacks as defined by camera3_device_ops structure.  See
+   * hardware/libhardware/include/hardware/camera3.h for information on each
+   * of these callbacks. Implemented in this class, these callbacks simply
+   * dispatch the call into an instance of EmulatedCamera3 class defined in
+   * the 'camera_device3' parameter.
+   ***************************************************************************/
 
-    virtual void dump(int fd);
+ private:
+  /** Startup */
+  static int initialize(const struct camera3_device *,
+                        const camera3_callback_ops_t *callback_ops);
 
-    /****************************************************************************
-     * Camera API callbacks as defined by camera3_device_ops structure.  See
-     * hardware/libhardware/include/hardware/camera3.h for information on each
-     * of these callbacks. Implemented in this class, these callbacks simply
-     * dispatch the call into an instance of EmulatedCamera3 class defined in
-     * the 'camera_device3' parameter.
-     ***************************************************************************/
+  /** Stream configuration and buffer registration */
 
-private:
+  static int configure_streams(const struct camera3_device *,
+                               camera3_stream_configuration_t *stream_list);
 
-    /** Startup */
-    static int initialize(const struct camera3_device *,
-            const camera3_callback_ops_t *callback_ops);
+  static int register_stream_buffers(
+      const struct camera3_device *,
+      const camera3_stream_buffer_set_t *buffer_set);
 
-    /** Stream configuration and buffer registration */
+  /** Template request settings provision */
 
-    static int configure_streams(const struct camera3_device *,
-            camera3_stream_configuration_t *stream_list);
+  static const camera_metadata_t *construct_default_request_settings(
+      const struct camera3_device *, int type);
 
-    static int register_stream_buffers(const struct camera3_device *,
-            const camera3_stream_buffer_set_t *buffer_set);
+  /** Submission of capture requests to HAL */
 
-    /** Template request settings provision */
+  static int process_capture_request(const struct camera3_device *,
+                                     camera3_capture_request_t *request);
 
-    static const camera_metadata_t* construct_default_request_settings(
-            const struct camera3_device *, int type);
+  static void dump(const camera3_device_t *, int fd);
 
-    /** Submission of capture requests to HAL */
+  static int flush(const camera3_device_t *);
 
-    static int process_capture_request(const struct camera3_device *,
-            camera3_capture_request_t *request);
+  /** For hw_device_t ops */
+  static int close(struct hw_device_t *device);
 
-    static void dump(const camera3_device_t *, int fd);
+  /****************************************************************************
+   * Data members shared with implementations
+   ***************************************************************************/
+ protected:
+  enum {
+    // State at construction time, and after a device operation error
+    STATUS_ERROR = 0,
+    // State after startup-time init and after device instance close
+    STATUS_CLOSED,
+    // State after being opened, before device instance init
+    STATUS_OPEN,
+    // State after device instance initialization
+    STATUS_READY,
+    // State while actively capturing data
+    STATUS_ACTIVE
+  } mStatus;
 
-    static int flush(const camera3_device_t *);
+  /**
+   * Callbacks back to the framework
+   */
 
-    /** For hw_device_t ops */
-    static int close(struct hw_device_t* device);
+  void sendCaptureResult(camera3_capture_result_t *result);
+  void sendNotify(camera3_notify_msg_t *msg);
 
-    /****************************************************************************
-     * Data members shared with implementations
-     ***************************************************************************/
-  protected:
-
-    enum {
-        // State at construction time, and after a device operation error
-        STATUS_ERROR = 0,
-        // State after startup-time init and after device instance close
-        STATUS_CLOSED,
-        // State after being opened, before device instance init
-        STATUS_OPEN,
-        // State after device instance initialization
-        STATUS_READY,
-        // State while actively capturing data
-        STATUS_ACTIVE
-    } mStatus;
-
-    /**
-     * Callbacks back to the framework
-     */
-
-    void sendCaptureResult(camera3_capture_result_t *result);
-    void sendNotify(camera3_notify_msg_t *msg);
-
-    /****************************************************************************
-     * Data members
-     ***************************************************************************/
-  private:
-    static camera3_device_ops_t   sDeviceOps;
-    const camera3_callback_ops_t *mCallbackOps;
+  /****************************************************************************
+   * Data members
+   ***************************************************************************/
+ private:
+  static camera3_device_ops_t sDeviceOps;
+  const camera3_callback_ops_t *mCallbackOps;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H */
diff --git a/guest/hals/camera/EmulatedCameraCommon.h b/guest/hals/camera/EmulatedCameraCommon.h
index c1d575c..0ec7501 100644
--- a/guest/hals/camera/EmulatedCameraCommon.h
+++ b/guest/hals/camera/EmulatedCameraCommon.h
@@ -21,8 +21,8 @@
  * Contains common declarations that are used across the camera emulation.
  */
 
-#include <linux/videodev2.h>
 #include <hardware/camera.h>
+#include <linux/videodev2.h>
 
 /* A helper class that tracks a routine execution.
  * Basically, it dumps an enry message in its constructor, and an exit message
@@ -30,21 +30,18 @@
  * of this class at the beginning of the tracked routines / methods.
  */
 class HWERoutineTracker {
-public:
-    /* Constructor that prints an "entry" trace message. */
-    explicit HWERoutineTracker(const char* name)
-            : mName(name) {
-        ALOGV("Entering %s", mName);
-    }
+ public:
+  /* Constructor that prints an "entry" trace message. */
+  explicit HWERoutineTracker(const char* name) : mName(name) {
+    ALOGV("Entering %s", mName);
+  }
 
-    /* Destructor that prints a "leave" trace message. */
-    ~HWERoutineTracker() {
-        ALOGV("Leaving %s", mName);
-    }
+  /* Destructor that prints a "leave" trace message. */
+  ~HWERoutineTracker() { ALOGV("Leaving %s", mName); }
 
-private:
-    /* Stores the routine name. */
-    const char* mName;
+ private:
+  /* Stores the routine name. */
+  const char* mName;
 };
 
 /* Logs an execution of a routine / method. */
@@ -54,7 +51,7 @@
  * min / max macros
  */
 
-#define min(a,b)    (((a) < (b)) ? (a) : (b))
-#define max(a,b)    (((a) > (b)) ? (a) : (b))
+#define min(a, b) (((a) < (b)) ? (a) : (b))
+#define max(a, b) (((a) > (b)) ? (a) : (b))
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_COMMON_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_COMMON_H */
diff --git a/guest/hals/camera/EmulatedCameraDevice.cpp b/guest/hals/camera/EmulatedCameraDevice.cpp
index 36de83a..99f2782 100644
--- a/guest/hals/camera/EmulatedCameraDevice.cpp
+++ b/guest/hals/camera/EmulatedCameraDevice.cpp
@@ -15,8 +15,8 @@
  */
 
 /*
- * Contains implementation of an abstract class EmulatedCameraDevice that defines
- * functionality expected from an emulated physical camera device:
+ * Contains implementation of an abstract class EmulatedCameraDevice that
+ * defines functionality expected from an emulated physical camera device:
  *  - Obtaining and setting camera parameters
  *  - Capturing frames
  *  - Streaming video
@@ -25,11 +25,11 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_Device"
+#include "EmulatedCameraDevice.h"
 #include <cutils/log.h>
 #include <sys/select.h>
 #include <cmath>
 #include "EmulatedCamera.h"
-#include "EmulatedCameraDevice.h"
 
 namespace android {
 
@@ -43,218 +43,206 @@
       mWhiteBalanceScale(NULL),
       mIsFocusing(false),
       mSupportedWhiteBalanceScale(),
-      mState(ECDS_CONSTRUCTED)
-{
-}
+      mState(ECDS_CONSTRUCTED) {}
 
-EmulatedCameraDevice::~EmulatedCameraDevice()
-{
-    ALOGV("EmulatedCameraDevice destructor");
-    if (mCurrentFrame != NULL) {
-        delete[] mCurrentFrame;
+EmulatedCameraDevice::~EmulatedCameraDevice() {
+  ALOGV("EmulatedCameraDevice destructor");
+  if (mCurrentFrame != NULL) {
+    delete[] mCurrentFrame;
+  }
+  for (size_t i = 0; i < mSupportedWhiteBalanceScale.size(); ++i) {
+    if (mSupportedWhiteBalanceScale.valueAt(i) != NULL) {
+      delete[] mSupportedWhiteBalanceScale.valueAt(i);
     }
-    for (size_t i = 0; i < mSupportedWhiteBalanceScale.size(); ++i) {
-        if (mSupportedWhiteBalanceScale.valueAt(i) != NULL) {
-            delete[] mSupportedWhiteBalanceScale.valueAt(i);
-        }
-    }
+  }
 }
 
 /****************************************************************************
  * Emulated camera device public API
  ***************************************************************************/
 
-status_t EmulatedCameraDevice::Initialize()
-{
-    if (isInitialized()) {
-        ALOGW("%s: Emulated camera device is already initialized: mState = %d",
-             __FUNCTION__, mState);
-        return NO_ERROR;
-    }
-
-    /* Instantiate worker thread object. */
-    mWorkerThread = new WorkerThread(this);
-    if (getWorkerThread() == NULL) {
-        ALOGE("%s: Unable to instantiate worker thread object", __FUNCTION__);
-        return ENOMEM;
-    }
-
-    mState = ECDS_INITIALIZED;
-
+status_t EmulatedCameraDevice::Initialize() {
+  if (isInitialized()) {
+    ALOGW("%s: Emulated camera device is already initialized: mState = %d",
+          __FUNCTION__, mState);
     return NO_ERROR;
+  }
+
+  /* Instantiate worker thread object. */
+  mWorkerThread = new WorkerThread(this);
+  if (getWorkerThread() == NULL) {
+    ALOGE("%s: Unable to instantiate worker thread object", __FUNCTION__);
+    return ENOMEM;
+  }
+
+  mState = ECDS_INITIALIZED;
+
+  return NO_ERROR;
 }
 
-status_t EmulatedCameraDevice::startDeliveringFrames(bool one_burst)
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCameraDevice::startDeliveringFrames(bool one_burst) {
+  ALOGV("%s", __FUNCTION__);
 
-    if (!isStarted()) {
-        ALOGE("%s: Device is not started", __FUNCTION__);
-        return EINVAL;
-    }
+  if (!isStarted()) {
+    ALOGE("%s: Device is not started", __FUNCTION__);
+    return EINVAL;
+  }
 
-    /* Frames will be delivered from the thread routine. */
-    const status_t res = startWorkerThread(one_burst);
-    ALOGE_IF(res != NO_ERROR, "%s: startWorkerThread failed", __FUNCTION__);
-    return res;
+  /* Frames will be delivered from the thread routine. */
+  const status_t res = startWorkerThread(one_burst);
+  ALOGE_IF(res != NO_ERROR, "%s: startWorkerThread failed", __FUNCTION__);
+  return res;
 }
 
-status_t EmulatedCameraDevice::stopDeliveringFrames()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCameraDevice::stopDeliveringFrames() {
+  ALOGV("%s", __FUNCTION__);
 
-    if (!isStarted()) {
-        ALOGW("%s: Device is not started", __FUNCTION__);
-        return NO_ERROR;
-    }
+  if (!isStarted()) {
+    ALOGW("%s: Device is not started", __FUNCTION__);
+    return NO_ERROR;
+  }
 
-    const status_t res = stopWorkerThread();
-    ALOGE_IF(res != NO_ERROR, "%s: startWorkerThread failed", __FUNCTION__);
-    return res;
+  const status_t res = stopWorkerThread();
+  ALOGE_IF(res != NO_ERROR, "%s: startWorkerThread failed", __FUNCTION__);
+  return res;
 }
 
 void EmulatedCameraDevice::setExposureCompensation(const float ev) {
-    ALOGV("%s", __FUNCTION__);
+  ALOGV("%s", __FUNCTION__);
 
-    if (!isStarted()) {
-        ALOGW("%s: Fake camera device is not started.", __FUNCTION__);
-    }
+  if (!isStarted()) {
+    ALOGW("%s: Fake camera device is not started.", __FUNCTION__);
+  }
 
-    mExposureCompensation = std::pow(2.0f, ev / GAMMA_CORRECTION);
-    ALOGV("New exposure compensation is %f", mExposureCompensation);
+  mExposureCompensation = std::pow(2.0f, ev / GAMMA_CORRECTION);
+  ALOGV("New exposure compensation is %f", mExposureCompensation);
 }
 
 void EmulatedCameraDevice::initializeWhiteBalanceModes(const char* mode,
                                                        const float r_scale,
                                                        const float b_scale) {
-    ALOGV("%s with %s, %f, %f", __FUNCTION__, mode, r_scale, b_scale);
-    float* value = new float[3];
-    value[0] = r_scale; value[1] = 1.0f; value[2] = b_scale;
-    mSupportedWhiteBalanceScale.add(String8(mode), value);
+  ALOGV("%s with %s, %f, %f", __FUNCTION__, mode, r_scale, b_scale);
+  float* value = new float[3];
+  value[0] = r_scale;
+  value[1] = 1.0f;
+  value[2] = b_scale;
+  mSupportedWhiteBalanceScale.add(String8(mode), value);
 }
 
 void EmulatedCameraDevice::setWhiteBalanceMode(const char* mode) {
-    ALOGV("%s with white balance %s", __FUNCTION__, mode);
-    mWhiteBalanceScale =
-            mSupportedWhiteBalanceScale.valueFor(String8(mode));
+  ALOGV("%s with white balance %s", __FUNCTION__, mode);
+  mWhiteBalanceScale = mSupportedWhiteBalanceScale.valueFor(String8(mode));
 }
 
-void EmulatedCameraDevice::startAutoFocus() {
-    mIsFocusing = true;
-}
+void EmulatedCameraDevice::startAutoFocus() { mIsFocusing = true; }
 
 /* Computes the pixel value after adjusting the white balance to the current
  * one. The input the y, u, v channel of the pixel and the adjusted value will
  * be stored in place. The adjustment is done in RGB space.
  */
-void EmulatedCameraDevice::changeWhiteBalance(uint8_t& y,
-                                              uint8_t& u,
+void EmulatedCameraDevice::changeWhiteBalance(uint8_t& y, uint8_t& u,
                                               uint8_t& v) const {
-    float r_scale = mWhiteBalanceScale[0];
-    float b_scale = mWhiteBalanceScale[2];
-    int r = static_cast<float>(YUV2R(y, u, v)) / r_scale;
-    int g = YUV2G(y, u, v);
-    int b = static_cast<float>(YUV2B(y, u, v)) / b_scale;
+  float r_scale = mWhiteBalanceScale[0];
+  float b_scale = mWhiteBalanceScale[2];
+  int r = static_cast<float>(YUV2R(y, u, v)) / r_scale;
+  int g = YUV2G(y, u, v);
+  int b = static_cast<float>(YUV2B(y, u, v)) / b_scale;
 
-    y = RGB2Y(r, g, b);
-    u = RGB2U(r, g, b);
-    v = RGB2V(r, g, b);
+  y = RGB2Y(r, g, b);
+  u = RGB2U(r, g, b);
+  v = RGB2V(r, g, b);
 }
 
 void EmulatedCameraDevice::simulateAutoFocus() {
-    if (mIsFocusing) {
-        ALOGV("%s: Simulating auto-focus", __FUNCTION__);
-        mCameraHAL->onCameraFocusAcquired();
-        mIsFocusing = false;
-    }
+  if (mIsFocusing) {
+    ALOGV("%s: Simulating auto-focus", __FUNCTION__);
+    mCameraHAL->onCameraFocusAcquired();
+    mIsFocusing = false;
+  }
 }
 
-status_t EmulatedCameraDevice::getCurrentPreviewFrame(void* buffer)
-{
-    if (!isStarted()) {
-        ALOGE("%s: Device is not started", __FUNCTION__);
-        return EINVAL;
-    }
-    if (mCurrentFrame == NULL || buffer == NULL) {
-        ALOGE("%s: No framebuffer", __FUNCTION__);
-        return EINVAL;
-    }
+status_t EmulatedCameraDevice::getCurrentPreviewFrame(void* buffer) {
+  if (!isStarted()) {
+    ALOGE("%s: Device is not started", __FUNCTION__);
+    return EINVAL;
+  }
+  if (mCurrentFrame == NULL || buffer == NULL) {
+    ALOGE("%s: No framebuffer", __FUNCTION__);
+    return EINVAL;
+  }
 
-    /* In emulation the framebuffer is never RGB. */
-    switch (mPixelFormat) {
-        case V4L2_PIX_FMT_YVU420:
-            YV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
-            return NO_ERROR;
-        case V4L2_PIX_FMT_YUV420:
-            YU12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
-            return NO_ERROR;
-        case V4L2_PIX_FMT_NV21:
-            NV21ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
-            return NO_ERROR;
-        case V4L2_PIX_FMT_NV12:
-            NV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
-            return NO_ERROR;
+  /* In emulation the framebuffer is never RGB. */
+  switch (mPixelFormat) {
+    case V4L2_PIX_FMT_YVU420:
+      YV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+      return NO_ERROR;
+    case V4L2_PIX_FMT_YUV420:
+      YU12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+      return NO_ERROR;
+    case V4L2_PIX_FMT_NV21:
+      NV21ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+      return NO_ERROR;
+    case V4L2_PIX_FMT_NV12:
+      NV12ToRGB32(mCurrentFrame, buffer, mFrameWidth, mFrameHeight);
+      return NO_ERROR;
 
-        default:
-            ALOGE("%s: Unknown pixel format %.4s",
-                 __FUNCTION__, reinterpret_cast<const char*>(&mPixelFormat));
-            return EINVAL;
-    }
+    default:
+      ALOGE("%s: Unknown pixel format %.4s", __FUNCTION__,
+            reinterpret_cast<const char*>(&mPixelFormat));
+      return EINVAL;
+  }
 }
 
 /****************************************************************************
  * Emulated camera device private API
  ***************************************************************************/
 
-status_t EmulatedCameraDevice::commonStartDevice(int width,
-                                                 int height,
-                                                 uint32_t pix_fmt,
-                                                 int fps)
-{
-    /* Validate pixel format, and calculate framebuffer size at the same time. */
-    switch (pix_fmt) {
-        case V4L2_PIX_FMT_YVU420:
-        case V4L2_PIX_FMT_YUV420:
-        case V4L2_PIX_FMT_NV21:
-        case V4L2_PIX_FMT_NV12:
-            mFrameBufferSize = (width * height * 12) / 8;
-            break;
+status_t EmulatedCameraDevice::commonStartDevice(int width, int height,
+                                                 uint32_t pix_fmt, int fps) {
+  /* Validate pixel format, and calculate framebuffer size at the same time. */
+  switch (pix_fmt) {
+    case V4L2_PIX_FMT_YVU420:
+    case V4L2_PIX_FMT_YUV420:
+    case V4L2_PIX_FMT_NV21:
+    case V4L2_PIX_FMT_NV12:
+      mFrameBufferSize = (width * height * 12) / 8;
+      break;
 
-        default:
-            ALOGE("%s: Unknown pixel format %.4s",
-                 __FUNCTION__, reinterpret_cast<const char*>(&pix_fmt));
-            return EINVAL;
-    }
+    default:
+      ALOGE("%s: Unknown pixel format %.4s", __FUNCTION__,
+            reinterpret_cast<const char*>(&pix_fmt));
+      return EINVAL;
+  }
 
-    /* Cache framebuffer info. */
-    mFrameWidth = width;
-    mFrameHeight = height;
-    mPixelFormat = pix_fmt;
-    mTotalPixels = width * height;
-    mTargetFps = fps;
+  /* Cache framebuffer info. */
+  mFrameWidth = width;
+  mFrameHeight = height;
+  mPixelFormat = pix_fmt;
+  mTotalPixels = width * height;
+  mTargetFps = fps;
 
-    /* Allocate framebuffer. */
-    mCurrentFrame = new uint8_t[mFrameBufferSize];
-    if (mCurrentFrame == NULL) {
-        ALOGE("%s: Unable to allocate framebuffer", __FUNCTION__);
-        return ENOMEM;
-    }
-    ALOGV("%s: Allocated %p %zu bytes for %d pixels in %.4s[%dx%d] frame",
-         __FUNCTION__, mCurrentFrame, mFrameBufferSize, mTotalPixels,
-         reinterpret_cast<const char*>(&mPixelFormat), mFrameWidth, mFrameHeight);
-    return NO_ERROR;
+  /* Allocate framebuffer. */
+  mCurrentFrame = new uint8_t[mFrameBufferSize];
+  if (mCurrentFrame == NULL) {
+    ALOGE("%s: Unable to allocate framebuffer", __FUNCTION__);
+    return ENOMEM;
+  }
+  ALOGV("%s: Allocated %p %zu bytes for %d pixels in %.4s[%dx%d] frame",
+        __FUNCTION__, mCurrentFrame, mFrameBufferSize, mTotalPixels,
+        reinterpret_cast<const char*>(&mPixelFormat), mFrameWidth,
+        mFrameHeight);
+  return NO_ERROR;
 }
 
-void EmulatedCameraDevice::commonStopDevice()
-{
-    mFrameWidth = mFrameHeight = mTotalPixels = 0;
-    mPixelFormat = 0;
-    mTargetFps = 0;
+void EmulatedCameraDevice::commonStopDevice() {
+  mFrameWidth = mFrameHeight = mTotalPixels = 0;
+  mPixelFormat = 0;
+  mTargetFps = 0;
 
-    if (mCurrentFrame != NULL) {
-        delete[] mCurrentFrame;
-        mCurrentFrame = NULL;
-    }
+  if (mCurrentFrame != NULL) {
+    delete[] mCurrentFrame;
+    mCurrentFrame = NULL;
+  }
 }
 
 status_t EmulatedCameraDevice::getImageMetadata(struct ImageMetadata* meta) {
@@ -267,176 +255,169 @@
  * Worker thread management.
  ***************************************************************************/
 
-status_t EmulatedCameraDevice::startWorkerThread(bool one_burst)
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCameraDevice::startWorkerThread(bool one_burst) {
+  ALOGV("%s", __FUNCTION__);
 
-    if (!isInitialized()) {
-        ALOGE("%s: Emulated camera device is not initialized", __FUNCTION__);
-        return EINVAL;
-    }
+  if (!isInitialized()) {
+    ALOGE("%s: Emulated camera device is not initialized", __FUNCTION__);
+    return EINVAL;
+  }
 
-    const status_t res = getWorkerThread()->startThread(one_burst);
-    ALOGE_IF(res != NO_ERROR, "%s: Unable to start worker thread", __FUNCTION__);
-    return res;
+  const status_t res = getWorkerThread()->startThread(one_burst);
+  ALOGE_IF(res != NO_ERROR, "%s: Unable to start worker thread", __FUNCTION__);
+  return res;
 }
 
-status_t EmulatedCameraDevice::stopWorkerThread()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedCameraDevice::stopWorkerThread() {
+  ALOGV("%s", __FUNCTION__);
 
-    if (!isInitialized()) {
-        ALOGE("%s: Emulated camera device is not initialized", __FUNCTION__);
-        return EINVAL;
-    }
+  if (!isInitialized()) {
+    ALOGE("%s: Emulated camera device is not initialized", __FUNCTION__);
+    return EINVAL;
+  }
 
-    const status_t res = getWorkerThread()->stopThread();
-    ALOGE_IF(res != NO_ERROR, "%s: Unable to stop worker thread", __FUNCTION__);
-    return res;
+  const status_t res = getWorkerThread()->stopThread();
+  ALOGE_IF(res != NO_ERROR, "%s: Unable to stop worker thread", __FUNCTION__);
+  return res;
 }
 
-bool EmulatedCameraDevice::inWorkerThread()
-{
-    /* This will end the thread loop, and will terminate the thread. Derived
-     * classes must override this method. */
-    return false;
+bool EmulatedCameraDevice::inWorkerThread() {
+  /* This will end the thread loop, and will terminate the thread. Derived
+   * classes must override this method. */
+  return false;
 }
 
 /****************************************************************************
  * Worker thread implementation.
  ***************************************************************************/
 
-status_t EmulatedCameraDevice::WorkerThread::readyToRun()
-{
-    ALOGV("Starting emulated camera device worker thread...");
+status_t EmulatedCameraDevice::WorkerThread::readyToRun() {
+  ALOGV("Starting emulated camera device worker thread...");
 
-    ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
-            "%s: Thread control FDs are opened", __FUNCTION__);
-    /* Create a pair of FDs that would be used to control the thread. */
-    int thread_fds[2];
-    status_t ret;
-    Mutex::Autolock lock(mCameraDevice->mObjectLock);
-    if (pipe(thread_fds) == 0) {
-        mThreadControl = thread_fds[1];
-        mControlFD = thread_fds[0];
-        ALOGV("Emulated device's worker thread has been started.");
-        ret = NO_ERROR;
-    } else {
-        ALOGE("%s: Unable to create thread control FDs: %d -> %s",
-             __FUNCTION__, errno, strerror(errno));
-        ret = errno;
-    }
+  ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
+           "%s: Thread control FDs are opened", __FUNCTION__);
+  /* Create a pair of FDs that would be used to control the thread. */
+  int thread_fds[2];
+  status_t ret;
+  Mutex::Autolock lock(mCameraDevice->mObjectLock);
+  if (pipe(thread_fds) == 0) {
+    mThreadControl = thread_fds[1];
+    mControlFD = thread_fds[0];
+    ALOGV("Emulated device's worker thread has been started.");
+    ret = NO_ERROR;
+  } else {
+    ALOGE("%s: Unable to create thread control FDs: %d -> %s", __FUNCTION__,
+          errno, strerror(errno));
+    ret = errno;
+  }
 
-    mSetup.signal();
-    return ret;
+  mSetup.signal();
+  return ret;
 }
 
-status_t EmulatedCameraDevice::WorkerThread::stopThread()
-{
-    ALOGV("Stopping emulated camera device's worker thread...");
+status_t EmulatedCameraDevice::WorkerThread::stopThread() {
+  ALOGV("Stopping emulated camera device's worker thread...");
 
-    status_t res = EINVAL;
+  status_t res = EINVAL;
 
-    // Limit the scope of the Autolock
-    {
-      // If thread is running and readyToRun() has not finished running,
-      //    then wait until it is done.
-      Mutex::Autolock lock(mCameraDevice->mObjectLock);
+  // Limit the scope of the Autolock
+  {
+    // If thread is running and readyToRun() has not finished running,
+    //    then wait until it is done.
+    Mutex::Autolock lock(mCameraDevice->mObjectLock);
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-      if (isRunning() && (mThreadControl < 0 || mControlFD < 0)) {
+    if (isRunning() && (mThreadControl < 0 || mControlFD < 0)) {
 #else
-      if (getTid() != -1 && (mThreadControl < 0 || mControlFD < 0)) {
+    if (getTid() != -1 && (mThreadControl < 0 || mControlFD < 0)) {
 #endif
-          mSetup.wait(mCameraDevice->mObjectLock);
-      }
+      mSetup.wait(mCameraDevice->mObjectLock);
     }
+  }
 
-    if (mThreadControl >= 0) {
-        /* Send "stop" message to the thread loop. */
-        const ControlMessage msg = THREAD_STOP;
-        const int wres =
-            TEMP_FAILURE_RETRY(write(mThreadControl, &msg, sizeof(msg)));
-        if (wres == sizeof(msg)) {
-            /* Stop the thread, and wait till it's terminated. */
-            res = requestExitAndWait();
-            if (res == NO_ERROR) {
-                /* Close control FDs. */
-                if (mThreadControl >= 0) {
-                    close(mThreadControl);
-                    mThreadControl = -1;
-                }
-                if (mControlFD >= 0) {
-                    close(mControlFD);
-                    mControlFD = -1;
-                }
-                ALOGV("Emulated camera device's worker thread has been stopped.");
-            } else {
-                ALOGE("%s: requestExitAndWait failed: %d -> %s",
-                     __FUNCTION__, res, strerror(-res));
-            }
-        } else {
-            ALOGE("%s: Unable to send THREAD_STOP message: %d -> %s",
-                 __FUNCTION__, errno, strerror(errno));
-            res = errno ? errno : EINVAL;
+  if (mThreadControl >= 0) {
+    /* Send "stop" message to the thread loop. */
+    const ControlMessage msg = THREAD_STOP;
+    const int wres =
+        TEMP_FAILURE_RETRY(write(mThreadControl, &msg, sizeof(msg)));
+    if (wres == sizeof(msg)) {
+      /* Stop the thread, and wait till it's terminated. */
+      res = requestExitAndWait();
+      if (res == NO_ERROR) {
+        /* Close control FDs. */
+        if (mThreadControl >= 0) {
+          close(mThreadControl);
+          mThreadControl = -1;
         }
+        if (mControlFD >= 0) {
+          close(mControlFD);
+          mControlFD = -1;
+        }
+        ALOGV("Emulated camera device's worker thread has been stopped.");
+      } else {
+        ALOGE("%s: requestExitAndWait failed: %d -> %s", __FUNCTION__, res,
+              strerror(-res));
+      }
     } else {
-        ALOGE("%s: Thread control FDs are not opened", __FUNCTION__);
+      ALOGE("%s: Unable to send THREAD_STOP message: %d -> %s", __FUNCTION__,
+            errno, strerror(errno));
+      res = errno ? errno : EINVAL;
     }
+  } else {
+    ALOGE("%s: Thread control FDs are not opened", __FUNCTION__);
+  }
 
-    return res;
+  return res;
 }
 
 EmulatedCameraDevice::WorkerThread::SelectRes
-EmulatedCameraDevice::WorkerThread::Select(int fd, int timeout)
-{
-    fd_set fds[1];
-    struct timeval tv, *tvp = NULL;
+EmulatedCameraDevice::WorkerThread::Select(int fd, int timeout) {
+  fd_set fds[1];
+  struct timeval tv, *tvp = NULL;
 
-    mCameraDevice->simulateAutoFocus();
+  mCameraDevice->simulateAutoFocus();
 
-    const int fd_num = (fd >= 0) ? max(fd, mControlFD) + 1 :
-                                   mControlFD + 1;
-    FD_ZERO(fds);
-    FD_SET(mControlFD, fds);
-    if (fd >= 0) {
-        FD_SET(fd, fds);
+  const int fd_num = (fd >= 0) ? max(fd, mControlFD) + 1 : mControlFD + 1;
+  FD_ZERO(fds);
+  FD_SET(mControlFD, fds);
+  if (fd >= 0) {
+    FD_SET(fd, fds);
+  }
+  if (timeout) {
+    tv.tv_sec = timeout / 1000000;
+    tv.tv_usec = timeout % 1000000;
+    tvp = &tv;
+  }
+  int res = TEMP_FAILURE_RETRY(select(fd_num, fds, NULL, NULL, tvp));
+  if (res < 0) {
+    ALOGE("%s: select returned %d and failed: %d -> %s", __FUNCTION__, res,
+          errno, strerror(errno));
+    return ERROR;
+  } else if (res == 0) {
+    /* Timeout. */
+    return TIMEOUT;
+  } else if (FD_ISSET(mControlFD, fds)) {
+    /* A control event. Lets read the message. */
+    ControlMessage msg;
+    res = TEMP_FAILURE_RETRY(read(mControlFD, &msg, sizeof(msg)));
+    if (res != sizeof(msg)) {
+      ALOGE("%s: Unexpected message size %d, or an error %d -> %s",
+            __FUNCTION__, res, errno, strerror(errno));
+      return ERROR;
     }
-    if (timeout) {
-        tv.tv_sec = timeout / 1000000;
-        tv.tv_usec = timeout % 1000000;
-        tvp = &tv;
-    }
-    int res = TEMP_FAILURE_RETRY(select(fd_num, fds, NULL, NULL, tvp));
-    if (res < 0) {
-        ALOGE("%s: select returned %d and failed: %d -> %s",
-             __FUNCTION__, res, errno, strerror(errno));
-        return ERROR;
-    } else if (res == 0) {
-        /* Timeout. */
-        return TIMEOUT;
-    } else if (FD_ISSET(mControlFD, fds)) {
-        /* A control event. Lets read the message. */
-        ControlMessage msg;
-        res = TEMP_FAILURE_RETRY(read(mControlFD, &msg, sizeof(msg)));
-        if (res != sizeof(msg)) {
-            ALOGE("%s: Unexpected message size %d, or an error %d -> %s",
-                 __FUNCTION__, res, errno, strerror(errno));
-            return ERROR;
-        }
-        /* THREAD_STOP is the only message expected here. */
-        if (msg == THREAD_STOP) {
-            ALOGV("%s: THREAD_STOP message is received", __FUNCTION__);
-            return EXIT_THREAD;
-        } else {
-            ALOGE("Unknown worker thread message %d", msg);
-            return ERROR;
-        }
+    /* THREAD_STOP is the only message expected here. */
+    if (msg == THREAD_STOP) {
+      ALOGV("%s: THREAD_STOP message is received", __FUNCTION__);
+      return EXIT_THREAD;
     } else {
-        /* Must be an FD. */
-        ALOGW_IF(fd < 0 || !FD_ISSET(fd, fds), "%s: Undefined 'select' result",
-                __FUNCTION__);
-        return READY;
+      ALOGE("Unknown worker thread message %d", msg);
+      return ERROR;
     }
+  } else {
+    /* Must be an FD. */
+    ALOGW_IF(fd < 0 || !FD_ISSET(fd, fds), "%s: Undefined 'select' result",
+             __FUNCTION__);
+    return READY;
+  }
 }
 
-};  /* namespace android */
+}; /* namespace android */
diff --git a/guest/hals/camera/EmulatedCameraDevice.h b/guest/hals/camera/EmulatedCameraDevice.h
index 7d0a749..ccd786f 100644
--- a/guest/hals/camera/EmulatedCameraDevice.h
+++ b/guest/hals/camera/EmulatedCameraDevice.h
@@ -26,11 +26,11 @@
  *  - etc.
  */
 
-#include <utils/threads.h>
 #include <utils/KeyedVector.h>
 #include <utils/String8.h>
-#include "EmulatedCameraCommon.h"
+#include <utils/threads.h>
 #include "Converters.h"
+#include "EmulatedCameraCommon.h"
 #include "ImageMetadata.h"
 
 namespace android {
@@ -45,529 +45,506 @@
  *  - etc.
  */
 class EmulatedCameraDevice {
-public:
-    /* Constructs EmulatedCameraDevice instance.
-     * Param:
-     *  camera_hal - Emulated camera that implements the camera HAL API, and
-     *      manages (contains) this object.
-     */
-    explicit EmulatedCameraDevice(EmulatedCamera* camera_hal);
+ public:
+  /* Constructs EmulatedCameraDevice instance.
+   * Param:
+   *  camera_hal - Emulated camera that implements the camera HAL API, and
+   *      manages (contains) this object.
+   */
+  explicit EmulatedCameraDevice(EmulatedCamera* camera_hal);
 
-    /* Destructs EmulatedCameraDevice instance. */
-    virtual ~EmulatedCameraDevice();
+  /* Destructs EmulatedCameraDevice instance. */
+  virtual ~EmulatedCameraDevice();
 
-    /***************************************************************************
-     * Emulated camera device abstract interface
-     **************************************************************************/
+  /***************************************************************************
+   * Emulated camera device abstract interface
+   **************************************************************************/
 
-public:
-    /* Connects to the camera device.
-     * This method must be called on an initialized instance of this class.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    virtual status_t connectDevice() = 0;
+ public:
+  /* Connects to the camera device.
+   * This method must be called on an initialized instance of this class.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t connectDevice() = 0;
 
-    /* Disconnects from the camera device.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status. If this method is
-     *  called for already disconnected, or uninitialized instance of this class,
-     *  a successful status must be returned from this method. If this method is
-     *  called for an instance that is in the "started" state, this method must
-     *  return a failure.
-     */
-    virtual status_t disconnectDevice() = 0;
+  /* Disconnects from the camera device.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status. If this method is
+   *  called for already disconnected, or uninitialized instance of this class,
+   *  a successful status must be returned from this method. If this method is
+   *  called for an instance that is in the "started" state, this method must
+   *  return a failure.
+   */
+  virtual status_t disconnectDevice() = 0;
 
-    /* Starts the camera device.
-     * This method tells the camera device to start capturing frames of the given
-     * dimensions for the given pixel format. Note that this method doesn't start
-     * the delivery of the captured frames to the emulated camera. Call
-     * startDeliveringFrames method to start delivering frames. This method must
-     * be called on a connected instance of this class. If it is called on a
-     * disconnected instance, this method must return a failure.
-     * Param:
-     *  width, height - Frame dimensions to use when capturing video frames.
-     *  pix_fmt - Pixel format to use when capturing video frames.
-     *  fps - Target rate of frames per second.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    virtual status_t startDevice(int width,
-                                 int height,
-                                 uint32_t pix_fmt,
-                                 int fps) = 0;
+  /* Starts the camera device.
+   * This method tells the camera device to start capturing frames of the given
+   * dimensions for the given pixel format. Note that this method doesn't start
+   * the delivery of the captured frames to the emulated camera. Call
+   * startDeliveringFrames method to start delivering frames. This method must
+   * be called on a connected instance of this class. If it is called on a
+   * disconnected instance, this method must return a failure.
+   * Param:
+   *  width, height - Frame dimensions to use when capturing video frames.
+   *  pix_fmt - Pixel format to use when capturing video frames.
+   *  fps - Target rate of frames per second.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t startDevice(int width, int height, uint32_t pix_fmt,
+                               int fps) = 0;
 
-    /* Stops the camera device.
-     * This method tells the camera device to stop capturing frames. Note that
-     * this method doesn't stop delivering frames to the emulated camera. Always
-     * call stopDeliveringFrames prior to calling this method.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status. If this method is
-     *  called for an object that is not capturing frames, or is disconnected,
-     *  or is uninitialized, a successful status must be returned from this
-     *  method.
-     */
-    virtual status_t stopDevice() = 0;
+  /* Stops the camera device.
+   * This method tells the camera device to stop capturing frames. Note that
+   * this method doesn't stop delivering frames to the emulated camera. Always
+   * call stopDeliveringFrames prior to calling this method.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status. If this method is
+   *  called for an object that is not capturing frames, or is disconnected,
+   *  or is uninitialized, a successful status must be returned from this
+   *  method.
+   */
+  virtual status_t stopDevice() = 0;
 
-    /***************************************************************************
-     * Emulated camera device public API
-     **************************************************************************/
+  /***************************************************************************
+   * Emulated camera device public API
+   **************************************************************************/
 
-public:
-    /* Initializes EmulatedCameraDevice instance.
-     * Derived classes should override this method in order to cache static
-     * properties of the physical device (list of supported pixel formats, frame
-     * sizes, etc.) If this method is called on an already initialized instance,
-     * it must return a successful status.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    virtual status_t Initialize();
+ public:
+  /* Initializes EmulatedCameraDevice instance.
+   * Derived classes should override this method in order to cache static
+   * properties of the physical device (list of supported pixel formats, frame
+   * sizes, etc.) If this method is called on an already initialized instance,
+   * it must return a successful status.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t Initialize();
 
-    /* Initializes the white balance modes parameters.
-     * The parameters are passed by each individual derived camera API to
-     * represent that different camera manufacturers may have different
-     * preferences on the white balance parameters. Green channel in the RGB
-     * color space is fixed to keep the luminance to be reasonably constant.
-     *
-     * Param:
-     * mode the text describing the current white balance mode
-     * r_scale the scale factor for the R channel in RGB space
-     * b_scale the scale factor for the B channel in RGB space.
-     */
-    void initializeWhiteBalanceModes(const char* mode,
-                                     const float r_scale,
-                                     const float b_scale);
+  /* Initializes the white balance modes parameters.
+   * The parameters are passed by each individual derived camera API to
+   * represent that different camera manufacturers may have different
+   * preferences on the white balance parameters. Green channel in the RGB
+   * color space is fixed to keep the luminance to be reasonably constant.
+   *
+   * Param:
+   * mode the text describing the current white balance mode
+   * r_scale the scale factor for the R channel in RGB space
+   * b_scale the scale factor for the B channel in RGB space.
+   */
+  void initializeWhiteBalanceModes(const char* mode, const float r_scale,
+                                   const float b_scale);
 
-    /* Starts delivering frames captured from the camera device.
-     * This method will start the worker thread that would be pulling frames from
-     * the camera device, and will deliver the pulled frames back to the emulated
-     * camera via onNextFrameAvailable callback. This method must be called on a
-     * connected instance of this class with a started camera device. If it is
-     * called on a disconnected instance, or camera device has not been started,
-     * this method must return a failure.
-     * Param:
-     *  one_burst - Controls how many frames should be delivered. If this
-     *      parameter is 'true', only one captured frame will be delivered to the
-     *      emulated camera. If this parameter is 'false', frames will keep
-     *      coming until stopDeliveringFrames method is called. Typically, this
-     *      parameter is set to 'true' only in order to obtain a single frame
-     *      that will be used as a "picture" in takePicture method of the
-     *      emulated camera.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    virtual status_t startDeliveringFrames(bool one_burst);
+  /* Starts delivering frames captured from the camera device.
+   * This method will start the worker thread that would be pulling frames from
+   * the camera device, and will deliver the pulled frames back to the emulated
+   * camera via onNextFrameAvailable callback. This method must be called on a
+   * connected instance of this class with a started camera device. If it is
+   * called on a disconnected instance, or camera device has not been started,
+   * this method must return a failure.
+   * Param:
+   *  one_burst - Controls how many frames should be delivered. If this
+   *      parameter is 'true', only one captured frame will be delivered to the
+   *      emulated camera. If this parameter is 'false', frames will keep
+   *      coming until stopDeliveringFrames method is called. Typically, this
+   *      parameter is set to 'true' only in order to obtain a single frame
+   *      that will be used as a "picture" in takePicture method of the
+   *      emulated camera.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t startDeliveringFrames(bool one_burst);
 
-    /* Stops delivering frames captured from the camera device.
-     * This method will stop the worker thread started by startDeliveringFrames.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    virtual status_t stopDeliveringFrames();
+  /* Stops delivering frames captured from the camera device.
+   * This method will stop the worker thread started by startDeliveringFrames.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t stopDeliveringFrames();
 
-    /* Sets the exposure compensation for the camera device.
-     */
-    void setExposureCompensation(const float ev);
+  /* Sets the exposure compensation for the camera device.
+   */
+  void setExposureCompensation(const float ev);
 
-    /* Sets the white balance mode for the device.
-     */
-    void setWhiteBalanceMode(const char* mode);
+  /* Sets the white balance mode for the device.
+   */
+  void setWhiteBalanceMode(const char* mode);
 
-    /* Initiates focus operation.
-     */
-    virtual void startAutoFocus();
+  /* Initiates focus operation.
+   */
+  virtual void startAutoFocus();
 
-    /* Gets current framebuffer, converted into preview frame format.
-     * This method must be called on a connected instance of this class with a
-     * started camera device. If it is called on a disconnected instance, or
-     * camera device has not been started, this method must return a failure.
-     * Note that this method should be called only after at least one frame has
-     * been captured and delivered. Otherwise it will return garbage in the
-     * preview frame buffer. Typically, this method shuld be called from
-     * onNextFrameAvailable callback.
-     * Param:
-     *  buffer - Buffer, large enough to contain the entire preview frame.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    virtual status_t getCurrentPreviewFrame(void* buffer);
+  /* Gets current framebuffer, converted into preview frame format.
+   * This method must be called on a connected instance of this class with a
+   * started camera device. If it is called on a disconnected instance, or
+   * camera device has not been started, this method must return a failure.
+   * Note that this method should be called only after at least one frame has
+   * been captured and delivered. Otherwise it will return garbage in the
+   * preview frame buffer. Typically, this method shuld be called from
+   * onNextFrameAvailable callback.
+   * Param:
+   *  buffer - Buffer, large enough to contain the entire preview frame.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t getCurrentPreviewFrame(void* buffer);
 
-    /* Gets width of the frame obtained from the physical device.
-     * Return:
-     *  Width of the frame obtained from the physical device. Note that value
-     *  returned from this method is valid only in case if camera device has been
-     *  started.
-     */
-    inline int getFrameWidth() const
-    {
-        ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
-        return mFrameWidth;
-    }
+  /* Gets width of the frame obtained from the physical device.
+   * Return:
+   *  Width of the frame obtained from the physical device. Note that value
+   *  returned from this method is valid only in case if camera device has been
+   *  started.
+   */
+  inline int getFrameWidth() const {
+    ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+    return mFrameWidth;
+  }
 
-    /* Gets height of the frame obtained from the physical device.
-     * Return:
-     *  Height of the frame obtained from the physical device. Note that value
-     *  returned from this method is valid only in case if camera device has been
-     *  started.
-     */
-    inline int getFrameHeight() const
-    {
-        ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
-        return mFrameHeight;
-    }
+  /* Gets height of the frame obtained from the physical device.
+   * Return:
+   *  Height of the frame obtained from the physical device. Note that value
+   *  returned from this method is valid only in case if camera device has been
+   *  started.
+   */
+  inline int getFrameHeight() const {
+    ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+    return mFrameHeight;
+  }
 
-    /* Gets byte size of the current frame buffer.
-     * Return:
-     *  Byte size of the frame buffer. Note that value returned from this method
-     *  is valid only in case if camera device has been started.
-     */
-    inline size_t getFrameBufferSize() const
-    {
-        ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
-        return mFrameBufferSize;
-    }
+  /* Gets byte size of the current frame buffer.
+   * Return:
+   *  Byte size of the frame buffer. Note that value returned from this method
+   *  is valid only in case if camera device has been started.
+   */
+  inline size_t getFrameBufferSize() const {
+    ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+    return mFrameBufferSize;
+  }
 
-    /* Gets number of pixels in the current frame buffer.
-     * Return:
-     *  Number of pixels in the frame buffer. Note that value returned from this
-     *  method is valid only in case if camera device has been started.
-     */
-    inline int getPixelNum() const
-    {
-        ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
-        return mTotalPixels;
-    }
+  /* Gets number of pixels in the current frame buffer.
+   * Return:
+   *  Number of pixels in the frame buffer. Note that value returned from this
+   *  method is valid only in case if camera device has been started.
+   */
+  inline int getPixelNum() const {
+    ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+    return mTotalPixels;
+  }
 
-    /* Gets pixel format of the frame that camera device streams to this class.
-     * Throughout camera framework, there are three different forms of pixel
-     * format representation:
-     *  - Original format, as reported by the actual camera device. Values for
-     *    this format are declared in bionic/libc/kernel/common/linux/videodev2.h
-     *  - String representation as defined in CameraParameters::PIXEL_FORMAT_XXX
-     *    strings in frameworks/base/include/camera/CameraParameters.h
-     *  - HAL_PIXEL_FORMAT_XXX format, as defined in system/core/include/system/graphics.h
-     * Since emulated camera device gets its data from the actual device, it gets
-     * pixel format in the original form. And that's the pixel format
-     * representation that will be returned from this method. HAL components will
-     * need to translate value returned from this method to the appropriate form.
-     * This method must be called only on started instance of this class, since
-     * it's applicable only when camera device is ready to stream frames.
-     * Param:
-     *  pix_fmt - Upon success contains the original pixel format.
-     * Return:
-     *  Current framebuffer's pixel format. Note that value returned from this
-     *  method is valid only in case if camera device has been started.
-     */
-    inline uint32_t getOriginalPixelFormat() const
-    {
-        ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
-        return mPixelFormat;
-    }
+  /* Gets pixel format of the frame that camera device streams to this class.
+   * Throughout camera framework, there are three different forms of pixel
+   * format representation:
+   *  - Original format, as reported by the actual camera device. Values for
+   *    this format are declared in bionic/libc/kernel/common/linux/videodev2.h
+   *  - String representation as defined in CameraParameters::PIXEL_FORMAT_XXX
+   *    strings in frameworks/base/include/camera/CameraParameters.h
+   *  - HAL_PIXEL_FORMAT_XXX format, as defined in
+   * system/core/include/system/graphics.h Since emulated camera device gets its
+   * data from the actual device, it gets pixel format in the original form. And
+   * that's the pixel format representation that will be returned from this
+   * method. HAL components will need to translate value returned from this
+   * method to the appropriate form. This method must be called only on started
+   * instance of this class, since it's applicable only when camera device is
+   * ready to stream frames. Param: pix_fmt - Upon success contains the original
+   * pixel format. Return: Current framebuffer's pixel format. Note that value
+   * returned from this method is valid only in case if camera device has been
+   * started.
+   */
+  inline uint32_t getOriginalPixelFormat() const {
+    ALOGE_IF(!isStarted(), "%s: Device is not started", __FUNCTION__);
+    return mPixelFormat;
+  }
 
-    /* Gets image metadata (from HAL).
-     * Return:
-     *  Filled in ImageMetadata structure (in/out parameter).
-     */
-    status_t getImageMetadata(struct ::ImageMetadata* meta);
+  /* Gets image metadata (from HAL).
+   * Return:
+   *  Filled in ImageMetadata structure (in/out parameter).
+   */
+  status_t getImageMetadata(struct ::ImageMetadata* meta);
 
-    /*
-     * State checkers.
-     */
+  /*
+   * State checkers.
+   */
 
-    inline bool isInitialized() const {
-        /* Instance is initialized when the worker thread has been successfuly
-         * created (but not necessarily started). */
-        return mWorkerThread.get() != NULL && mState != ECDS_CONSTRUCTED;
-    }
-    inline bool isConnected() const {
-        /* Instance is connected when its status is either"connected", or
-         * "started". */
-        return mState == ECDS_CONNECTED || mState == ECDS_STARTED;
-    }
-    inline bool isStarted() const {
-        return mState == ECDS_STARTED;
-    }
+  inline bool isInitialized() const {
+    /* Instance is initialized when the worker thread has been successfuly
+     * created (but not necessarily started). */
+    return mWorkerThread.get() != NULL && mState != ECDS_CONSTRUCTED;
+  }
+  inline bool isConnected() const {
+    /* Instance is connected when its status is either"connected", or
+     * "started". */
+    return mState == ECDS_CONNECTED || mState == ECDS_STARTED;
+  }
+  inline bool isStarted() const { return mState == ECDS_STARTED; }
 
+  /****************************************************************************
+   * Emulated camera device private API
+   ***************************************************************************/
+ protected:
+  /* Performs common validation and calculation of startDevice parameters.
+   * Param:
+   *  width, height, pix_fmt, fps - Parameters passed to startDevice method.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t commonStartDevice(int width, int height, uint32_t pix_fmt,
+                                     int fps);
+
+  /* Performs common cleanup on stopDevice.
+   * This method will undo what commonStartDevice had done.
+   */
+  virtual void commonStopDevice();
+
+  /** Computes a luminance value after taking the exposure compensation.
+   * value into account.
+   *
+   * Param:
+   * inputY - The input luminance value.
+   * Return:
+   * The luminance value after adjusting the exposure compensation.
+   */
+  inline uint8_t changeExposure(const uint8_t& inputY) const {
+    return static_cast<uint8_t>(
+        clamp(static_cast<float>(inputY) * mExposureCompensation));
+  }
+
+  /** Simulates focusing and reports completion to the client.
+   */
+  void simulateAutoFocus();
+
+  /** Computes the pixel value in YUV space after adjusting to the current
+   * white balance mode.
+   */
+  void changeWhiteBalance(uint8_t& y, uint8_t& u, uint8_t& v) const;
+
+  /****************************************************************************
+   * Worker thread management.
+   * Typicaly when emulated camera device starts capturing frames from the
+   * actual device, it does that in a worker thread created in StartCapturing,
+   * and terminated in StopCapturing. Since this is such a typical scenario,
+   * it makes sence to encapsulate worker thread management in the base class
+   * for all emulated camera devices.
+   ***************************************************************************/
+
+ protected:
+  /* Starts the worker thread.
+   * Typically, worker thread is started from startDeliveringFrames method of
+   * this class.
+   * Param:
+   *  one_burst - Controls how many times thread loop should run. If this
+   *      parameter is 'true', thread routine will run only once If this
+   *      parameter is 'false', thread routine will run until stopWorkerThread
+   *      method is called. See startDeliveringFrames for more info.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t startWorkerThread(bool one_burst);
+
+  /* Stops the worker thread.
+   * Note that this method will always wait for the worker thread to terminate.
+   * Typically, worker thread is started from stopDeliveringFrames method of
+   * this class.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t stopWorkerThread();
+
+  /* Implementation of the worker thread routine.
+   * In the default implementation of the worker thread routine we simply
+   * return 'false' forcing the thread loop to exit, and the thread to
+   * terminate. Derived class should override that method to provide there the
+   * actual frame delivery.
+   * Return:
+   *  true To continue thread loop (this method will be called again), or false
+   *  to exit the thread loop and to terminate the thread.
+   */
+  virtual bool inWorkerThread();
+
+  /* Encapsulates a worker thread used by the emulated camera device.
+   */
+  friend class WorkerThread;
+  class WorkerThread : public Thread {
     /****************************************************************************
-     * Emulated camera device private API
-     ***************************************************************************/
-protected:
-    /* Performs common validation and calculation of startDevice parameters.
-     * Param:
-     *  width, height, pix_fmt, fps - Parameters passed to startDevice method.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    virtual status_t commonStartDevice(int width,
-                                       int height,
-                                       uint32_t pix_fmt,
-                                       int fps);
-
-    /* Performs common cleanup on stopDevice.
-     * This method will undo what commonStartDevice had done.
-     */
-    virtual void commonStopDevice();
-
-    /** Computes a luminance value after taking the exposure compensation.
-     * value into account.
-     *
-     * Param:
-     * inputY - The input luminance value.
-     * Return:
-     * The luminance value after adjusting the exposure compensation.
-     */
-    inline uint8_t changeExposure(const uint8_t& inputY) const {
-        return static_cast<uint8_t>(clamp(static_cast<float>(inputY) *
-                                    mExposureCompensation));
-    }
-
-    /** Simulates focusing and reports completion to the client.
-     */
-    void simulateAutoFocus();
-
-    /** Computes the pixel value in YUV space after adjusting to the current
-     * white balance mode.
-     */
-    void changeWhiteBalance(uint8_t& y, uint8_t& u, uint8_t& v) const;
-
-    /****************************************************************************
-     * Worker thread management.
-     * Typicaly when emulated camera device starts capturing frames from the
-     * actual device, it does that in a worker thread created in StartCapturing,
-     * and terminated in StopCapturing. Since this is such a typical scenario,
-     * it makes sence to encapsulate worker thread management in the base class
-     * for all emulated camera devices.
+     * Public API
      ***************************************************************************/
 
-protected:
-    /* Starts the worker thread.
-     * Typically, worker thread is started from startDeliveringFrames method of
-     * this class.
+   public:
+    inline explicit WorkerThread(EmulatedCameraDevice* camera_dev)
+        : Thread(true),  // Callbacks may involve Java calls.
+          mCameraDevice(camera_dev),
+          mThreadControl(-1),
+          mControlFD(-1) {}
+
+    inline ~WorkerThread() {
+      ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
+               "%s: Control FDs are opened in the destructor", __FUNCTION__);
+      if (mThreadControl >= 0) {
+        close(mThreadControl);
+      }
+      if (mControlFD >= 0) {
+        close(mControlFD);
+      }
+    }
+
+    /* Starts the thread
      * Param:
-     *  one_burst - Controls how many times thread loop should run. If this
-     *      parameter is 'true', thread routine will run only once If this
-     *      parameter is 'false', thread routine will run until stopWorkerThread
-     *      method is called. See startDeliveringFrames for more info.
+     *  one_burst - Controls how many times thread loop should run. If
+     *      this parameter is 'true', thread routine will run only once
+     *      If this parameter is 'false', thread routine will run until
+     *      stopThread method is called. See startWorkerThread for more
+     *      info.
      * Return:
      *  NO_ERROR on success, or an appropriate error status.
      */
-    virtual status_t startWorkerThread(bool one_burst);
+    inline status_t startThread(bool one_burst) {
+      mOneBurst = one_burst;
+      return run("Camera_startThread", ANDROID_PRIORITY_URGENT_DISPLAY, 0);
+    }
 
-    /* Stops the worker thread.
-     * Note that this method will always wait for the worker thread to terminate.
-     * Typically, worker thread is started from stopDeliveringFrames method of
-     * this class.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
+    /* Overriden base class method.
+     * It is overriden in order to provide one-time initialization just
+     * prior to starting the thread routine.
      */
-    virtual status_t stopWorkerThread();
+    status_t readyToRun();
 
-    /* Implementation of the worker thread routine.
-     * In the default implementation of the worker thread routine we simply
-     * return 'false' forcing the thread loop to exit, and the thread to
-     * terminate. Derived class should override that method to provide there the
-     * actual frame delivery.
-     * Return:
-     *  true To continue thread loop (this method will be called again), or false
-     *  to exit the thread loop and to terminate the thread.
-     */
-    virtual bool inWorkerThread();
+    /* Stops the thread. */
+    status_t stopThread();
 
-    /* Encapsulates a worker thread used by the emulated camera device.
-     */
-    friend class WorkerThread;
-    class WorkerThread : public Thread {
-
-        /****************************************************************************
-         * Public API
-         ***************************************************************************/
-
-        public:
-            inline explicit WorkerThread(EmulatedCameraDevice* camera_dev)
-                : Thread(true),   // Callbacks may involve Java calls.
-                  mCameraDevice(camera_dev),
-                  mThreadControl(-1),
-                  mControlFD(-1)
-            {
-            }
-
-            inline ~WorkerThread()
-            {
-                ALOGW_IF(mThreadControl >= 0 || mControlFD >= 0,
-                        "%s: Control FDs are opened in the destructor",
-                        __FUNCTION__);
-                if (mThreadControl >= 0) {
-                    close(mThreadControl);
-                }
-                if (mControlFD >= 0) {
-                    close(mControlFD);
-                }
-            }
-
-            /* Starts the thread
-             * Param:
-             *  one_burst - Controls how many times thread loop should run. If
-             *      this parameter is 'true', thread routine will run only once
-             *      If this parameter is 'false', thread routine will run until
-             *      stopThread method is called. See startWorkerThread for more
-             *      info.
-             * Return:
-             *  NO_ERROR on success, or an appropriate error status.
-             */
-            inline status_t startThread(bool one_burst)
-            {
-                mOneBurst = one_burst;
-                return run("Camera_startThread", ANDROID_PRIORITY_URGENT_DISPLAY, 0);
-            }
-
-            /* Overriden base class method.
-             * It is overriden in order to provide one-time initialization just
-             * prior to starting the thread routine.
-             */
-            status_t readyToRun();
-
-            /* Stops the thread. */
-            status_t stopThread();
-
-            /* Values returned from the Select method of this class. */
-            enum SelectRes {
-                /* A timeout has occurred. */
-                TIMEOUT,
-                /* Data are available for read on the provided FD. */
-                READY,
-                /* Thread exit request has been received. */
-                EXIT_THREAD,
-                /* An error has occurred. */
-                ERROR
-            };
-
-            /* Select on an FD event, keeping in mind thread exit message.
-             * Param:
-             *  fd - File descriptor on which to wait for an event. This
-             *      parameter may be negative. If it is negative this method will
-             *      only wait on a control message to the thread.
-             *  timeout - Timeout in microseconds. 0 indicates no timeout (wait
-             *      forever).
-             * Return:
-             *  See SelectRes enum comments.
-             */
-            SelectRes Select(int fd, int timeout);
-
-        /****************************************************************************
-         * Private API
-         ***************************************************************************/
-
-        private:
-            /* Implements abstract method of the base Thread class. */
-            bool threadLoop()
-            {
-                /* Simply dispatch the call to the containing camera device. */
-                if (mCameraDevice->inWorkerThread()) {
-                    /* Respect "one burst" parameter (see startThread). */
-                    return !mOneBurst;
-                } else {
-                    return false;
-                }
-            }
-
-            /* Containing camera device object. */
-            EmulatedCameraDevice*   mCameraDevice;
-
-            /* FD that is used to send control messages into the thread. */
-            int                     mThreadControl;
-
-            /* FD that thread uses to receive control messages. */
-            int                     mControlFD;
-
-            /* Controls number of times the thread loop runs.
-             * See startThread for more information. */
-            bool                    mOneBurst;
-
-            /* Enumerates control messages that can be sent into the thread. */
-            enum ControlMessage {
-                /* Stop the thread. */
-                THREAD_STOP
-            };
-
-            Condition mSetup;
+    /* Values returned from the Select method of this class. */
+    enum SelectRes {
+      /* A timeout has occurred. */
+      TIMEOUT,
+      /* Data are available for read on the provided FD. */
+      READY,
+      /* Thread exit request has been received. */
+      EXIT_THREAD,
+      /* An error has occurred. */
+      ERROR
     };
 
-    /* Worker thread accessor. */
-    inline WorkerThread* getWorkerThread() const
-    {
-        return mWorkerThread.get();
-    }
+    /* Select on an FD event, keeping in mind thread exit message.
+     * Param:
+     *  fd - File descriptor on which to wait for an event. This
+     *      parameter may be negative. If it is negative this method will
+     *      only wait on a control message to the thread.
+     *  timeout - Timeout in microseconds. 0 indicates no timeout (wait
+     *      forever).
+     * Return:
+     *  See SelectRes enum comments.
+     */
+    SelectRes Select(int fd, int timeout);
 
     /****************************************************************************
-     * Data members
+     * Private API
      ***************************************************************************/
 
-protected:
-    /* Locks this instance for parameters, state, etc. change. */
-    Mutex                       mObjectLock;
+   private:
+    /* Implements abstract method of the base Thread class. */
+    bool threadLoop() {
+      /* Simply dispatch the call to the containing camera device. */
+      if (mCameraDevice->inWorkerThread()) {
+        /* Respect "one burst" parameter (see startThread). */
+        return !mOneBurst;
+      } else {
+        return false;
+      }
+    }
 
-    /* Worker thread that is used in frame capturing. */
-    sp<WorkerThread>            mWorkerThread;
+    /* Containing camera device object. */
+    EmulatedCameraDevice* mCameraDevice;
 
-    /* Timestamp of the current frame. */
-    nsecs_t                     mCurFrameTimestamp;
+    /* FD that is used to send control messages into the thread. */
+    int mThreadControl;
 
-    /* Emulated camera object containing this instance. */
-    EmulatedCamera*             mCameraHAL;
+    /* FD that thread uses to receive control messages. */
+    int mControlFD;
 
-    /* Framebuffer containing the current frame. */
-    uint8_t*                    mCurrentFrame;
+    /* Controls number of times the thread loop runs.
+     * See startThread for more information. */
+    bool mOneBurst;
 
-    /*
-     * Framebuffer properties.
-     */
-
-    /* Byte size of the framebuffer. */
-    size_t                      mFrameBufferSize;
-
-    /* Original pixel format (one of the V4L2_PIX_FMT_XXX values, as defined in
-     * bionic/libc/kernel/common/linux/videodev2.h */
-    uint32_t                    mPixelFormat;
-
-    /* Frame width */
-    int                         mFrameWidth;
-
-    /* Frame height */
-    int                         mFrameHeight;
-
-    /* Total number of pixels */
-    int                         mTotalPixels;
-
-    /* Requested FPS rate */
-    int                         mTargetFps;
-
-    /* Exposure compensation value */
-    float                       mExposureCompensation;
-
-    float*                      mWhiteBalanceScale;
-
-    bool                        mIsFocusing;
-
-    DefaultKeyedVector<String8, float*>      mSupportedWhiteBalanceScale;
-
-    /* Defines possible states of the emulated camera device object.
-     */
-    enum EmulatedCameraDeviceState {
-        /* Object has been constructed. */
-        ECDS_CONSTRUCTED,
-        /* Object has been initialized. */
-        ECDS_INITIALIZED,
-        /* Object has been connected to the physical device. */
-        ECDS_CONNECTED,
-        /* Camera device has been started. */
-        ECDS_STARTED,
+    /* Enumerates control messages that can be sent into the thread. */
+    enum ControlMessage {
+      /* Stop the thread. */
+      THREAD_STOP
     };
 
-    /* Object state. */
-    EmulatedCameraDeviceState   mState;
+    Condition mSetup;
+  };
+
+  /* Worker thread accessor. */
+  inline WorkerThread* getWorkerThread() const { return mWorkerThread.get(); }
+
+  /****************************************************************************
+   * Data members
+   ***************************************************************************/
+
+ protected:
+  /* Locks this instance for parameters, state, etc. change. */
+  Mutex mObjectLock;
+
+  /* Worker thread that is used in frame capturing. */
+  sp<WorkerThread> mWorkerThread;
+
+  /* Timestamp of the current frame. */
+  nsecs_t mCurFrameTimestamp;
+
+  /* Emulated camera object containing this instance. */
+  EmulatedCamera* mCameraHAL;
+
+  /* Framebuffer containing the current frame. */
+  uint8_t* mCurrentFrame;
+
+  /*
+   * Framebuffer properties.
+   */
+
+  /* Byte size of the framebuffer. */
+  size_t mFrameBufferSize;
+
+  /* Original pixel format (one of the V4L2_PIX_FMT_XXX values, as defined in
+   * bionic/libc/kernel/common/linux/videodev2.h */
+  uint32_t mPixelFormat;
+
+  /* Frame width */
+  int mFrameWidth;
+
+  /* Frame height */
+  int mFrameHeight;
+
+  /* Total number of pixels */
+  int mTotalPixels;
+
+  /* Requested FPS rate */
+  int mTargetFps;
+
+  /* Exposure compensation value */
+  float mExposureCompensation;
+
+  float* mWhiteBalanceScale;
+
+  bool mIsFocusing;
+
+  DefaultKeyedVector<String8, float*> mSupportedWhiteBalanceScale;
+
+  /* Defines possible states of the emulated camera device object.
+   */
+  enum EmulatedCameraDeviceState {
+    /* Object has been constructed. */
+    ECDS_CONSTRUCTED,
+    /* Object has been initialized. */
+    ECDS_INITIALIZED,
+    /* Object has been connected to the physical device. */
+    ECDS_CONNECTED,
+    /* Camera device has been started. */
+    ECDS_STARTED,
+  };
+
+  /* Object state. */
+  EmulatedCameraDeviceState mState;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_DEVICE_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_DEVICE_H */
diff --git a/guest/hals/camera/EmulatedCameraFactory.cpp b/guest/hals/camera/EmulatedCameraFactory.cpp
index 00a3c99..f9ada4b 100644
--- a/guest/hals/camera/EmulatedCameraFactory.cpp
+++ b/guest/hals/camera/EmulatedCameraFactory.cpp
@@ -23,12 +23,12 @@
 #define LOG_TAG "EmulatedCamera_Factory"
 #include <cutils/log.h>
 #include <cutils/properties.h>
-#include "guest/libs/platform_support/api_level_fixes.h"
 #include "EmulatedFakeCamera.h"
+#include "guest/libs/platform_support/api_level_fixes.h"
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-#include "EmulatedFakeCamera2.h"
 #include "EmulatedCameraHotplugThread.h"
+#include "EmulatedFakeCamera2.h"
 #endif
 
 #if VSOC_PLATFORM_SDK_AFTER(L_MR1)
@@ -41,108 +41,106 @@
 
 namespace android {
 EmulatedCameraFactory& EmulatedCameraFactory::Instance() {
-    static EmulatedCameraFactory* factory = new EmulatedCameraFactory;
-    return *factory;
+  static EmulatedCameraFactory* factory = new EmulatedCameraFactory;
+  return *factory;
 }
 
 EmulatedCameraFactory::EmulatedCameraFactory()
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-        : mCallbacks(NULL)
+    : mCallbacks(NULL)
 #endif
 {
-    mCameraConfiguration.Init();
-    const std::vector<cvd::CameraDefinition>& cameras =
-        mCameraConfiguration.cameras();
-    for (size_t camera_index = 0;
-         camera_index < cameras.size();
-         ++camera_index) {
-        mCameraDefinitions.push(cameras[camera_index]);
-        /* Reserve a spot for camera, but don't create just yet. */
-        mEmulatedCameras.push(NULL);
-    }
+  mCameraConfiguration.Init();
+  const std::vector<cvd::CameraDefinition>& cameras =
+      mCameraConfiguration.cameras();
+  for (size_t camera_index = 0; camera_index < cameras.size(); ++camera_index) {
+    mCameraDefinitions.push(cameras[camera_index]);
+    /* Reserve a spot for camera, but don't create just yet. */
+    mEmulatedCameras.push(NULL);
+  }
 
-    ALOGV("%d cameras are being emulated.", getEmulatedCameraNum());
+  ALOGV("%d cameras are being emulated.", getEmulatedCameraNum());
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-    /* Create hotplug thread */
-    {
-        mHotplugThread = new EmulatedCameraHotplugThread(getEmulatedCameraNum());
-        mHotplugThread->run("EmulatedCameraHotplugThread");
-    }
+  /* Create hotplug thread */
+  {
+    mHotplugThread = new EmulatedCameraHotplugThread(getEmulatedCameraNum());
+    mHotplugThread->run("EmulatedCameraHotplugThread");
+  }
 #endif
 }
 
-EmulatedBaseCamera* EmulatedCameraFactory::getOrCreateFakeCamera(size_t cameraId) {
-    ::cvd::LockGuard< ::cvd::Mutex > lock(mEmulatedCamerasMutex);
+EmulatedBaseCamera* EmulatedCameraFactory::getOrCreateFakeCamera(
+    size_t cameraId) {
+  ::cvd::LockGuard< ::cvd::Mutex> lock(mEmulatedCamerasMutex);
 
-    if (cameraId >= getEmulatedCameraNum()) {
-        ALOGE("%s: Invalid camera ID: %d", __FUNCTION__, cameraId);
-        return NULL;
-    }
+  if (cameraId >= getEmulatedCameraNum()) {
+    ALOGE("%s: Invalid camera ID: %d", __FUNCTION__, cameraId);
+    return NULL;
+  }
 
-    if (mEmulatedCameras[cameraId] != NULL) {
-        return mEmulatedCameras[cameraId];
-    }
+  if (mEmulatedCameras[cameraId] != NULL) {
+    return mEmulatedCameras[cameraId];
+  }
 
-    const cvd::CameraDefinition& definition = mCameraDefinitions[cameraId];
-    bool is_back_facing =
-            (definition.orientation == cvd::CameraDefinition::kBack);
+  const cvd::CameraDefinition& definition = mCameraDefinitions[cameraId];
+  bool is_back_facing =
+      (definition.orientation == cvd::CameraDefinition::kBack);
 
-    EmulatedBaseCamera* camera;
-    /* Create, and initialize the fake camera */
-    switch (definition.hal_version) {
-        case cvd::CameraDefinition::kHalV1:
-            camera = new EmulatedFakeCamera(cameraId, is_back_facing,
-                                            &HAL_MODULE_INFO_SYM.common);
-            break;
+  EmulatedBaseCamera* camera;
+  /* Create, and initialize the fake camera */
+  switch (definition.hal_version) {
+    case cvd::CameraDefinition::kHalV1:
+      camera = new EmulatedFakeCamera(cameraId, is_back_facing,
+                                      &HAL_MODULE_INFO_SYM.common);
+      break;
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-        case cvd::CameraDefinition::kHalV2:
-            camera = new EmulatedFakeCamera2(cameraId, is_back_facing,
-                                             &HAL_MODULE_INFO_SYM.common);
-            break;
+    case cvd::CameraDefinition::kHalV2:
+      camera = new EmulatedFakeCamera2(cameraId, is_back_facing,
+                                       &HAL_MODULE_INFO_SYM.common);
+      break;
 #endif
 #if VSOC_PLATFORM_SDK_AFTER(L_MR1)
-        case cvd::CameraDefinition::kHalV3:
-            camera = new EmulatedFakeCamera3(cameraId, is_back_facing,
-                                        &HAL_MODULE_INFO_SYM.common);
-            break;
+    case cvd::CameraDefinition::kHalV3:
+      camera = new EmulatedFakeCamera3(cameraId, is_back_facing,
+                                       &HAL_MODULE_INFO_SYM.common);
+      break;
 #endif
-        default:
-            ALOGE("%s: Unsupported camera hal version requested: %d",
-                  __FUNCTION__, definition.hal_version);
-            return NULL;
-    }
+    default:
+      ALOGE("%s: Unsupported camera hal version requested: %d", __FUNCTION__,
+            definition.hal_version);
+      return NULL;
+  }
 
-    ALOGI("%s: Camera device %d hal version is %d", __FUNCTION__,
-          cameraId, definition.hal_version);
-    int res = camera->Initialize(definition);
+  ALOGI("%s: Camera device %d hal version is %d", __FUNCTION__, cameraId,
+        definition.hal_version);
+  int res = camera->Initialize(definition);
 
-    if (res != NO_ERROR) {
-        ALOGE("%s: Unable to intialize camera %d: %s (%d)",
-              __FUNCTION__, cameraId, strerror(-res), res);
-        delete camera;
-        return NULL;
-    }
+  if (res != NO_ERROR) {
+    ALOGE("%s: Unable to intialize camera %d: %s (%d)", __FUNCTION__, cameraId,
+          strerror(-res), res);
+    delete camera;
+    return NULL;
+  }
 
-    ALOGI("%s: Inserting camera", __FUNCTION__);
-    mEmulatedCameras.replaceAt(camera, cameraId);
-    ALOGI("%s: Done", __FUNCTION__);
-    return camera;
+  ALOGI("%s: Inserting camera", __FUNCTION__);
+  mEmulatedCameras.replaceAt(camera, cameraId);
+  ALOGI("%s: Done", __FUNCTION__);
+  return camera;
 }
 
-EmulatedCameraFactory::~EmulatedCameraFactory()
-{
-    for (size_t n = 0; n < mEmulatedCameras.size(); n++) {
-        if (mEmulatedCameras[n] != NULL) {
-            delete mEmulatedCameras[n];
-        }
+EmulatedCameraFactory::~EmulatedCameraFactory() {
+  for (size_t n = 0; n < mEmulatedCameras.size(); n++) {
+    if (mEmulatedCameras[n] != NULL) {
+      delete mEmulatedCameras[n];
     }
+  }
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-    if (mHotplugThread != NULL) {
-        mHotplugThread->requestExit();
-        mHotplugThread->join();
-    }
+  if (mHotplugThread != NULL) {
+    mHotplugThread->requestExit();
+    mHotplugThread->join();
+  }
 #endif
 }
 
@@ -154,53 +152,52 @@
  *
  ***************************************************************************/
 
-int EmulatedCameraFactory::cameraDeviceOpen(int camera_id, hw_device_t** device)
-{
-    ALOGV("%s: id = %d", __FUNCTION__, camera_id);
+int EmulatedCameraFactory::cameraDeviceOpen(int camera_id,
+                                            hw_device_t** device) {
+  ALOGV("%s: id = %d", __FUNCTION__, camera_id);
 
-    *device = NULL;
+  *device = NULL;
 
-    EmulatedBaseCamera* camera = getOrCreateFakeCamera(camera_id);
-    if (camera == NULL) return -EINVAL;
+  EmulatedBaseCamera* camera = getOrCreateFakeCamera(camera_id);
+  if (camera == NULL) return -EINVAL;
 
-    return camera->connectCamera(device);
+  return camera->connectCamera(device);
 }
 
-int EmulatedCameraFactory::getCameraInfo(int camera_id, struct camera_info* info)
-{
-    ALOGV("%s: id = %d", __FUNCTION__, camera_id);
+int EmulatedCameraFactory::getCameraInfo(int camera_id,
+                                         struct camera_info* info) {
+  ALOGV("%s: id = %d", __FUNCTION__, camera_id);
 
-    EmulatedBaseCamera* camera = getOrCreateFakeCamera(camera_id);
-    if (camera == NULL) return -EINVAL;
+  EmulatedBaseCamera* camera = getOrCreateFakeCamera(camera_id);
+  if (camera == NULL) return -EINVAL;
 
-    return camera->getCameraInfo(info);
+  return camera->getCameraInfo(info);
 }
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
 int EmulatedCameraFactory::setCallbacks(
-        const camera_module_callbacks_t *callbacks)
-{
-    ALOGV("%s: callbacks = %p", __FUNCTION__, callbacks);
+    const camera_module_callbacks_t* callbacks) {
+  ALOGV("%s: callbacks = %p", __FUNCTION__, callbacks);
 
-    mCallbacks = callbacks;
+  mCallbacks = callbacks;
 
-    return OK;
+  return OK;
 }
 
 void EmulatedCameraFactory::getVendorTagOps(vendor_tag_ops_t* ops) {
-    ALOGV("%s: ops = %p", __FUNCTION__, ops);
+  ALOGV("%s: ops = %p", __FUNCTION__, ops);
 
-    // No vendor tags defined for emulator yet, so not touching ops
+  // No vendor tags defined for emulator yet, so not touching ops
 }
 #endif
 
 int EmulatedCameraFactory::setTorchMode(const char* camera_id, bool enabled) {
-    ALOGV("%s: camera_id = %s, enabled =%d", __FUNCTION__, camera_id, enabled);
+  ALOGV("%s: camera_id = %s, enabled =%d", __FUNCTION__, camera_id, enabled);
 
-    EmulatedBaseCamera* camera = getOrCreateFakeCamera(atoi(camera_id));
-    if (camera == NULL) return -EINVAL;
+  EmulatedBaseCamera* camera = getOrCreateFakeCamera(atoi(camera_id));
+  if (camera == NULL) return -EINVAL;
 
-    return camera->setTorchMode(enabled);
+  return camera->setTorchMode(enabled);
 }
 
 /****************************************************************************
@@ -208,59 +205,54 @@
  ***************************************************************************/
 
 int EmulatedCameraFactory::device_open(const hw_module_t* module,
-                                       const char* name,
-                                       hw_device_t** device)
-{
-    /*
-     * Simply verify the parameters, and dispatch the call inside the
-     * EmulatedCameraFactory instance.
-     */
+                                       const char* name, hw_device_t** device) {
+  /*
+   * Simply verify the parameters, and dispatch the call inside the
+   * EmulatedCameraFactory instance.
+   */
 
-    if (module != &HAL_MODULE_INFO_SYM.common) {
-        ALOGE("%s: Invalid module %p expected %p",
-             __FUNCTION__, module, &HAL_MODULE_INFO_SYM.common);
-        return -EINVAL;
-    }
-    if (name == NULL) {
-        ALOGE("%s: NULL name is not expected here", __FUNCTION__);
-        return -EINVAL;
-    }
+  if (module != &HAL_MODULE_INFO_SYM.common) {
+    ALOGE("%s: Invalid module %p expected %p", __FUNCTION__, module,
+          &HAL_MODULE_INFO_SYM.common);
+    return -EINVAL;
+  }
+  if (name == NULL) {
+    ALOGE("%s: NULL name is not expected here", __FUNCTION__);
+    return -EINVAL;
+  }
 
-    return EmulatedCameraFactory::Instance().cameraDeviceOpen(atoi(name), device);
+  return EmulatedCameraFactory::Instance().cameraDeviceOpen(atoi(name), device);
 }
 
-int EmulatedCameraFactory::get_number_of_cameras(void)
-{
-    return EmulatedCameraFactory::Instance().getEmulatedCameraNum();
+int EmulatedCameraFactory::get_number_of_cameras(void) {
+  return EmulatedCameraFactory::Instance().getEmulatedCameraNum();
 }
 
 int EmulatedCameraFactory::get_camera_info(int camera_id,
-                                           struct camera_info* info)
-{
-    return EmulatedCameraFactory::Instance().getCameraInfo(camera_id, info);
+                                           struct camera_info* info) {
+  return EmulatedCameraFactory::Instance().getCameraInfo(camera_id, info);
 }
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
 int EmulatedCameraFactory::set_callbacks(
-        const camera_module_callbacks_t *callbacks)
-{
-    return EmulatedCameraFactory::Instance().setCallbacks(callbacks);
+    const camera_module_callbacks_t* callbacks) {
+  return EmulatedCameraFactory::Instance().setCallbacks(callbacks);
 }
 
-void EmulatedCameraFactory::get_vendor_tag_ops(vendor_tag_ops_t* ops)
-{
-    EmulatedCameraFactory::Instance().getVendorTagOps(ops);
+void EmulatedCameraFactory::get_vendor_tag_ops(vendor_tag_ops_t* ops) {
+  EmulatedCameraFactory::Instance().getVendorTagOps(ops);
 }
 #endif
 
 int EmulatedCameraFactory::open_legacy(const struct hw_module_t* module,
-        const char* id, uint32_t halVersion, struct hw_device_t** device) {
-    // Not supporting legacy open
-    return -ENOSYS;
+                                       const char* id, uint32_t halVersion,
+                                       struct hw_device_t** device) {
+  // Not supporting legacy open
+  return -ENOSYS;
 }
 
 int EmulatedCameraFactory::set_torch_mode(const char* camera_id, bool enabled) {
-    return EmulatedCameraFactory::Instance().setTorchMode(camera_id, enabled);
+  return EmulatedCameraFactory::Instance().setTorchMode(camera_id, enabled);
 }
 
 /********************************************************************************
@@ -268,54 +260,52 @@
  *******************************************************************************/
 
 void EmulatedCameraFactory::onStatusChanged(int cameraId, int newStatus) {
+  EmulatedBaseCamera* cam = getOrCreateFakeCamera(cameraId);
+  if (!cam) {
+    ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
+    return;
+  }
 
-    EmulatedBaseCamera *cam = getOrCreateFakeCamera(cameraId);
-    if (!cam) {
-        ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
-        return;
-    }
-
-    /**
-     * (Order is important)
-     * Send the callback first to framework, THEN close the camera.
-     */
+  /**
+   * (Order is important)
+   * Send the callback first to framework, THEN close the camera.
+   */
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-    if (newStatus == cam->getHotplugStatus()) {
-        ALOGW("%s: Ignoring transition to the same status", __FUNCTION__);
-        return;
-    }
+  if (newStatus == cam->getHotplugStatus()) {
+    ALOGW("%s: Ignoring transition to the same status", __FUNCTION__);
+    return;
+  }
 
-    const camera_module_callbacks_t* cb = mCallbacks;
-    if (cb != NULL && cb->camera_device_status_change != NULL) {
-        cb->camera_device_status_change(cb, cameraId, newStatus);
-    }
+  const camera_module_callbacks_t* cb = mCallbacks;
+  if (cb != NULL && cb->camera_device_status_change != NULL) {
+    cb->camera_device_status_change(cb, cameraId, newStatus);
+  }
 
-    if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
-        cam->unplugCamera();
-    } else if (newStatus == CAMERA_DEVICE_STATUS_PRESENT) {
-        cam->plugCamera();
-    }
+  if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
+    cam->unplugCamera();
+  } else if (newStatus == CAMERA_DEVICE_STATUS_PRESENT) {
+    cam->plugCamera();
+  }
 #endif
-
 }
 
-void EmulatedCameraFactory::onTorchModeStatusChanged(int cameraId, int newStatus) {
-    EmulatedBaseCamera *cam = getOrCreateFakeCamera(cameraId);
-    if (!cam) {
-        ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
-        return;
-    }
+void EmulatedCameraFactory::onTorchModeStatusChanged(int cameraId,
+                                                     int newStatus) {
+  EmulatedBaseCamera* cam = getOrCreateFakeCamera(cameraId);
+  if (!cam) {
+    ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
+    return;
+  }
 
 #if VSOC_PLATFORM_SDK_AFTER(L_MR1)
-    const camera_module_callbacks_t* cb = mCallbacks;
-    if (cb != NULL && cb->torch_mode_status_change != NULL) {
-        char id[10];
-        sprintf(id, "%d", cameraId);
-        cb->torch_mode_status_change(cb, id, newStatus);
-    }
+  const camera_module_callbacks_t* cb = mCallbacks;
+  if (cb != NULL && cb->torch_mode_status_change != NULL) {
+    char id[10];
+    sprintf(id, "%d", cameraId);
+    cb->torch_mode_status_change(cb, id, newStatus);
+  }
 #endif
-
 }
 
 /********************************************************************************
@@ -324,7 +314,6 @@
 
 /* Entry point for camera HAL API. */
 struct hw_module_methods_t EmulatedCameraFactory::mCameraModuleMethods = {
-    VSOC_STATIC_INITIALIZER(open) EmulatedCameraFactory::device_open
-};
+    VSOC_STATIC_INITIALIZER(open) EmulatedCameraFactory::device_open};
 
 }; /* namespace android */
diff --git a/guest/hals/camera/EmulatedCameraFactory.h b/guest/hals/camera/EmulatedCameraFactory.h
index 64588fd..6eb122c 100644
--- a/guest/hals/camera/EmulatedCameraFactory.h
+++ b/guest/hals/camera/EmulatedCameraFactory.h
@@ -20,10 +20,10 @@
 #include <utils/RefBase.h>
 
 #include <utils/Vector.h>
-#include "common/libs/threads/cuttlefish_thread.h"
-#include "guest/libs/platform_support/api_level_fixes.h"
 #include "CameraConfiguration.h"
 #include "EmulatedBaseCamera.h"
+#include "common/libs/threads/cuttlefish_thread.h"
+#include "guest/libs/platform_support/api_level_fixes.h"
 
 namespace android {
 
@@ -45,151 +45,152 @@
  * emulator is always created, so there is always at least one camera that is
  * available.
  *
- * Instance of this class is also used as the entry point for the camera HAL API,
- * including:
+ * Instance of this class is also used as the entry point for the camera HAL
+ * API, including:
  *  - hw_module_methods_t::open entry point
  *  - camera_module_t::get_number_of_cameras entry point
  *  - camera_module_t::get_camera_info entry point
  *
  */
 class EmulatedCameraFactory {
-public:
-    /* Constructs EmulatedCameraFactory instance.
-     * In this constructor the factory will create and initialize a list of
-     * emulated cameras. All errors that occur on this constructor are reported
-     * via mConstructedOK data member of this class.
-     */
-    EmulatedCameraFactory();
+ public:
+  /* Constructs EmulatedCameraFactory instance.
+   * In this constructor the factory will create and initialize a list of
+   * emulated cameras. All errors that occur on this constructor are reported
+   * via mConstructedOK data member of this class.
+   */
+  EmulatedCameraFactory();
 
-    /* Destructs EmulatedCameraFactory instance. */
-    ~EmulatedCameraFactory();
+  /* Destructs EmulatedCameraFactory instance. */
+  ~EmulatedCameraFactory();
 
-    /****************************************************************************
-     * Camera HAL API handlers.
-     ***************************************************************************/
+  /****************************************************************************
+   * Camera HAL API handlers.
+   ***************************************************************************/
 
-public:
-    /* Returns a (singleton) instance of the EmulatedCameraFactory.
-     */
-    static EmulatedCameraFactory& Instance();
+ public:
+  /* Returns a (singleton) instance of the EmulatedCameraFactory.
+   */
+  static EmulatedCameraFactory& Instance();
 
-    /* Opens (connects to) a camera device.
-     * This method is called in response to hw_module_methods_t::open callback.
-     */
-    int cameraDeviceOpen(int camera_id, hw_device_t** device);
+  /* Opens (connects to) a camera device.
+   * This method is called in response to hw_module_methods_t::open callback.
+   */
+  int cameraDeviceOpen(int camera_id, hw_device_t** device);
 
-    /* Gets emulated camera information.
-     * This method is called in response to camera_module_t::get_camera_info callback.
-     */
-    int getCameraInfo(int camera_id, struct camera_info *info);
+  /* Gets emulated camera information.
+   * This method is called in response to camera_module_t::get_camera_info
+   * callback.
+   */
+  int getCameraInfo(int camera_id, struct camera_info* info);
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-    /* Sets emulated camera callbacks.
-     * This method is called in response to camera_module_t::set_callbacks callback.
-     */
-    int setCallbacks(const camera_module_callbacks_t *callbacks);
+  /* Sets emulated camera callbacks.
+   * This method is called in response to camera_module_t::set_callbacks
+   * callback.
+   */
+  int setCallbacks(const camera_module_callbacks_t* callbacks);
 
-    /* Fill in vendor tags for the module
-     * This method is called in response to camera_module_t::get_vendor_tag_ops callback.
-     */
-    void getVendorTagOps(vendor_tag_ops_t* ops);
+  /* Fill in vendor tags for the module
+   * This method is called in response to camera_module_t::get_vendor_tag_ops
+   * callback.
+   */
+  void getVendorTagOps(vendor_tag_ops_t* ops);
 #endif
 
-    int setTorchMode(const char* camera_id, bool enabled);
+  int setTorchMode(const char* camera_id, bool enabled);
 
-    /****************************************************************************
-     * Camera HAL API callbacks.
-     ***************************************************************************/
+  /****************************************************************************
+   * Camera HAL API callbacks.
+   ***************************************************************************/
 
-public:
-    /* camera_module_t::get_number_of_cameras callback entry point. */
-    static int get_number_of_cameras(void);
+ public:
+  /* camera_module_t::get_number_of_cameras callback entry point. */
+  static int get_number_of_cameras(void);
 
-    /* camera_module_t::get_camera_info callback entry point. */
-    static int get_camera_info(int camera_id, struct camera_info *info);
+  /* camera_module_t::get_camera_info callback entry point. */
+  static int get_camera_info(int camera_id, struct camera_info* info);
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-    /* camera_module_t::set_callbacks callback entry point. */
-    static int set_callbacks(const camera_module_callbacks_t *callbacks);
+  /* camera_module_t::set_callbacks callback entry point. */
+  static int set_callbacks(const camera_module_callbacks_t* callbacks);
 
-    /* camera_module_t::get_vendor_tag_ops callback entry point */
-    static void get_vendor_tag_ops(vendor_tag_ops_t* ops);
+  /* camera_module_t::get_vendor_tag_ops callback entry point */
+  static void get_vendor_tag_ops(vendor_tag_ops_t* ops);
 #endif
 
-    /* camera_module_t::open_legacy callback entry point */
-    static int open_legacy(const struct hw_module_t* module, const char* id,
-            uint32_t halVersion, struct hw_device_t** device);
+  /* camera_module_t::open_legacy callback entry point */
+  static int open_legacy(const struct hw_module_t* module, const char* id,
+                         uint32_t halVersion, struct hw_device_t** device);
 
-    static int set_torch_mode(const char* camera_id, bool enabled);
+  static int set_torch_mode(const char* camera_id, bool enabled);
 
-private:
-    /* hw_module_methods_t::open callback entry point. */
-    static int device_open(const hw_module_t* module,
-                           const char* name,
-                           hw_device_t** device);
+ private:
+  /* hw_module_methods_t::open callback entry point. */
+  static int device_open(const hw_module_t* module, const char* name,
+                         hw_device_t** device);
 
-    /****************************************************************************
-     * Public API.
-     ***************************************************************************/
+  /****************************************************************************
+   * Public API.
+   ***************************************************************************/
 
-public:
+ public:
+  /* Gets fake camera orientation. */
+  int getFakeCameraOrientation() {
+    /* TODO: Have a boot property that controls that. */
+    return 90;
+  }
 
-    /* Gets fake camera orientation. */
-    int getFakeCameraOrientation() {
-        /* TODO: Have a boot property that controls that. */
-        return 90;
-    }
+  /* Gets number of emulated cameras.
+   */
+  inline size_t getEmulatedCameraNum() const {
+    return mCameraDefinitions.size();
+  }
 
-    /* Gets number of emulated cameras.
-     */
-    inline size_t getEmulatedCameraNum() const {
-        return mCameraDefinitions.size();
-    }
+  void onStatusChanged(int cameraId, int newStatus);
 
-    void onStatusChanged(int cameraId, int newStatus);
+  void onTorchModeStatusChanged(int cameraId, int newStatus);
 
-    void onTorchModeStatusChanged(int cameraId, int newStatus);
+  /****************************************************************************
+   * Private API
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Private API
-     ***************************************************************************/
+ private:
+  /* Create new or return existing fake camera based on camera definition
+   * found in mCameraDefinitions.
+   * Returns NULL if cameraId is not valid (= not a valid index of
+   * mCameraDefinitions)
+   */
+  EmulatedBaseCamera* getOrCreateFakeCamera(size_t cameraId);
 
-private:
-    /* Create new or return existing fake camera based on camera definition
-     * found in mCameraDefinitions.
-     * Returns NULL if cameraId is not valid (= not a valid index of
-     * mCameraDefinitions)
-     */
-    EmulatedBaseCamera* getOrCreateFakeCamera(size_t cameraId);
+  /****************************************************************************
+   * Data members.
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Data members.
-     ***************************************************************************/
+ private:
+  /* Array of cameras available for the emulation. */
+  Vector<EmulatedBaseCamera*> mEmulatedCameras;
 
-private:
-    /* Array of cameras available for the emulation. */
-    Vector<EmulatedBaseCamera*>  mEmulatedCameras;
-
-    /* Guards access to mEmulatedCameras. */
-    cvd::Mutex mEmulatedCamerasMutex;
+  /* Guards access to mEmulatedCameras. */
+  cvd::Mutex mEmulatedCamerasMutex;
 
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-    /* Camera callbacks (for status changing) */
-    const camera_module_callbacks_t* mCallbacks;
+  /* Camera callbacks (for status changing) */
+  const camera_module_callbacks_t* mCallbacks;
 
-    /* Hotplug thread (to call onStatusChanged) */
-    sp<EmulatedCameraHotplugThread> mHotplugThread;
+  /* Hotplug thread (to call onStatusChanged) */
+  sp<EmulatedCameraHotplugThread> mHotplugThread;
 #endif
 
-    /* Back- and front camera properties accessed from the vsoc device. */
-    cvd::CameraConfiguration mCameraConfiguration;
-    Vector<cvd::CameraDefinition> mCameraDefinitions;
+  /* Back- and front camera properties accessed from the vsoc device. */
+  cvd::CameraConfiguration mCameraConfiguration;
+  Vector<cvd::CameraDefinition> mCameraDefinitions;
 
-public:
-    /* Contains device open entry point, as required by HAL API. */
-    static struct hw_module_methods_t   mCameraModuleMethods;
+ public:
+  /* Contains device open entry point, as required by HAL API. */
+  static struct hw_module_methods_t mCameraModuleMethods;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_CAMERA_FACTORY_H */
diff --git a/guest/hals/camera/EmulatedCameraHal.cpp b/guest/hals/camera/EmulatedCameraHal.cpp
index 24c7c6d..fe3fe35 100644
--- a/guest/hals/camera/EmulatedCameraHal.cpp
+++ b/guest/hals/camera/EmulatedCameraHal.cpp
@@ -30,35 +30,44 @@
  * Required HAL header.
  */
 camera_module_t HAL_MODULE_INFO_SYM = {
-  VSOC_STATIC_INITIALIZER(common) {
-         VSOC_STATIC_INITIALIZER(tag)                HARDWARE_MODULE_TAG,
+  VSOC_STATIC_INITIALIZER(common){
+      VSOC_STATIC_INITIALIZER(tag) HARDWARE_MODULE_TAG,
 #if VSOC_PLATFORM_SDK_AFTER(L_MR1)
-         VSOC_STATIC_INITIALIZER(module_api_version) CAMERA_MODULE_API_VERSION_2_4,
+      VSOC_STATIC_INITIALIZER(module_api_version) CAMERA_MODULE_API_VERSION_2_4,
 #elif VSOC_PLATFORM_SDK_AFTER(K)
-         VSOC_STATIC_INITIALIZER(module_api_version) CAMERA_MODULE_API_VERSION_2_3,
+        VSOC_STATIC_INITIALIZER(module_api_version)
+            CAMERA_MODULE_API_VERSION_2_3,
 #elif VSOC_PLATFORM_SDK_AFTER(J_MR2)
-         VSOC_STATIC_INITIALIZER(module_api_version) CAMERA_MODULE_API_VERSION_2_2,
+        VSOC_STATIC_INITIALIZER(module_api_version)
+            CAMERA_MODULE_API_VERSION_2_2,
 #else
-         VSOC_STATIC_INITIALIZER(module_api_version) CAMERA_MODULE_API_VERSION_2_0,
+        VSOC_STATIC_INITIALIZER(module_api_version)
+            CAMERA_MODULE_API_VERSION_2_0,
 #endif
-         VSOC_STATIC_INITIALIZER(hal_api_version)    HARDWARE_HAL_API_VERSION,
-         VSOC_STATIC_INITIALIZER(id)                 CAMERA_HARDWARE_MODULE_ID,
-         VSOC_STATIC_INITIALIZER(name)               "Emulated Camera Module",
-         VSOC_STATIC_INITIALIZER(author)             "The Android Open Source Project",
-         VSOC_STATIC_INITIALIZER(methods)            &android::EmulatedCameraFactory::mCameraModuleMethods,
-         VSOC_STATIC_INITIALIZER(dso)                NULL,
-         VSOC_STATIC_INITIALIZER(reserved)           {0},
-    },
-    VSOC_STATIC_INITIALIZER(get_number_of_cameras)  android::EmulatedCameraFactory::get_number_of_cameras,
-    VSOC_STATIC_INITIALIZER(get_camera_info)        android::EmulatedCameraFactory::get_camera_info,
+      VSOC_STATIC_INITIALIZER(hal_api_version) HARDWARE_HAL_API_VERSION,
+      VSOC_STATIC_INITIALIZER(id) CAMERA_HARDWARE_MODULE_ID,
+      VSOC_STATIC_INITIALIZER(name) "Emulated Camera Module",
+      VSOC_STATIC_INITIALIZER(author) "The Android Open Source Project",
+      VSOC_STATIC_INITIALIZER(methods) &
+          android::EmulatedCameraFactory::mCameraModuleMethods,
+      VSOC_STATIC_INITIALIZER(dso) NULL,
+      VSOC_STATIC_INITIALIZER(reserved){0},
+  },
+  VSOC_STATIC_INITIALIZER(get_number_of_cameras)
+      android::EmulatedCameraFactory::get_number_of_cameras,
+  VSOC_STATIC_INITIALIZER(get_camera_info)
+      android::EmulatedCameraFactory::get_camera_info,
 #if VSOC_PLATFORM_SDK_AFTER(J_MR2)
-    VSOC_STATIC_INITIALIZER(set_callbacks)          android::EmulatedCameraFactory::set_callbacks,
-    VSOC_STATIC_INITIALIZER(get_vendor_tag_ops)     android::EmulatedCameraFactory::get_vendor_tag_ops,
+  VSOC_STATIC_INITIALIZER(set_callbacks)
+      android::EmulatedCameraFactory::set_callbacks,
+  VSOC_STATIC_INITIALIZER(get_vendor_tag_ops)
+      android::EmulatedCameraFactory::get_vendor_tag_ops,
 #endif
 #if VSOC_PLATFORM_SDK_AFTER(K)
-    VSOC_STATIC_INITIALIZER(open_legacy)            android::EmulatedCameraFactory::open_legacy,
+  VSOC_STATIC_INITIALIZER(open_legacy)
+      android::EmulatedCameraFactory::open_legacy,
 #endif
 #if VSOC_PLATFORM_SDK_AFTER(L_MR1)
-    set_torch_mode:         android::EmulatedCameraFactory::set_torch_mode,
+  set_torch_mode : android::EmulatedCameraFactory::set_torch_mode,
 #endif
 };
diff --git a/guest/hals/camera/EmulatedCameraHotplugThread.cpp b/guest/hals/camera/EmulatedCameraHotplugThread.cpp
index 459f037..6dd4580 100644
--- a/guest/hals/camera/EmulatedCameraHotplugThread.cpp
+++ b/guest/hals/camera/EmulatedCameraHotplugThread.cpp
@@ -17,353 +17,329 @@
 #define LOG_TAG "EmulatedCamera_HotplugThread"
 #include <cutils/log.h>
 
-#include <sys/types.h>
-#include <sys/stat.h>
 #include <fcntl.h>
 #include <sys/inotify.h>
+#include <sys/stat.h>
+#include <sys/types.h>
 
-#include "EmulatedCameraHotplugThread.h"
 #include "EmulatedCameraFactory.h"
+#include "EmulatedCameraHotplugThread.h"
 
 #define FAKE_HOTPLUG_FILE "/data/misc/media/emulator.camera.hotplug"
 
 #define EVENT_SIZE (sizeof(struct inotify_event))
-#define EVENT_BUF_LEN (1024*(EVENT_SIZE+16))
+#define EVENT_BUF_LEN (1024 * (EVENT_SIZE + 16))
 
 #define SubscriberInfo EmulatedCameraHotplugThread::SubscriberInfo
 
 namespace android {
 
 EmulatedCameraHotplugThread::EmulatedCameraHotplugThread(
-    size_t totalCameraCount) :
-        Thread(/*canCallJava*/false) {
+    size_t totalCameraCount)
+    : Thread(/*canCallJava*/ false) {
+  mRunning = true;
+  mInotifyFd = 0;
 
-    mRunning = true;
-    mInotifyFd = 0;
-
-    for (size_t id = 0; id < totalCameraCount; ++id) {
-        if (createFileIfNotExists(id)) {
-            mSubscribedCameraIds.push_back(id);
-        }
+  for (size_t id = 0; id < totalCameraCount; ++id) {
+    if (createFileIfNotExists(id)) {
+      mSubscribedCameraIds.push_back(id);
     }
+  }
 }
 
-EmulatedCameraHotplugThread::~EmulatedCameraHotplugThread() {
-}
+EmulatedCameraHotplugThread::~EmulatedCameraHotplugThread() {}
 
 status_t EmulatedCameraHotplugThread::requestExitAndWait() {
-    ALOGE("%s: Not implemented. Use requestExit + join instead",
-          __FUNCTION__);
-    return INVALID_OPERATION;
+  ALOGE("%s: Not implemented. Use requestExit + join instead", __FUNCTION__);
+  return INVALID_OPERATION;
 }
 
 void EmulatedCameraHotplugThread::requestExit() {
-    Mutex::Autolock al(mMutex);
+  Mutex::Autolock al(mMutex);
 
-    ALOGV("%s: Requesting thread exit", __FUNCTION__);
-    mRunning = false;
+  ALOGV("%s: Requesting thread exit", __FUNCTION__);
+  mRunning = false;
 
-    bool rmWatchFailed = false;
-    Vector<SubscriberInfo>::iterator it;
-    for (it = mSubscribers.begin(); it != mSubscribers.end(); ++it) {
+  bool rmWatchFailed = false;
+  Vector<SubscriberInfo>::iterator it;
+  for (it = mSubscribers.begin(); it != mSubscribers.end(); ++it) {
+    if (inotify_rm_watch(mInotifyFd, it->WatchID) == -1) {
+      ALOGE(
+          "%s: Could not remove watch for camID '%d',"
+          " error: '%s' (%d)",
+          __FUNCTION__, it->CameraID, strerror(errno), errno);
 
-        if (inotify_rm_watch(mInotifyFd, it->WatchID) == -1) {
-
-            ALOGE("%s: Could not remove watch for camID '%d',"
-                  " error: '%s' (%d)",
-                 __FUNCTION__, it->CameraID, strerror(errno),
-                 errno);
-
-            rmWatchFailed = true ;
-        } else {
-            ALOGV("%s: Removed watch for camID '%d'",
-                __FUNCTION__, it->CameraID);
-        }
+      rmWatchFailed = true;
+    } else {
+      ALOGV("%s: Removed watch for camID '%d'", __FUNCTION__, it->CameraID);
     }
+  }
 
-    if (rmWatchFailed) { // unlikely
-        // Give the thread a fighting chance to error out on the next
-        // read
-        if (close(mInotifyFd) == -1) {
-            ALOGE("%s: close failure error: '%s' (%d)",
-                 __FUNCTION__, strerror(errno), errno);
-        }
+  if (rmWatchFailed) {  // unlikely
+    // Give the thread a fighting chance to error out on the next
+    // read
+    if (close(mInotifyFd) == -1) {
+      ALOGE("%s: close failure error: '%s' (%d)", __FUNCTION__, strerror(errno),
+            errno);
     }
+  }
 
-    ALOGV("%s: Request exit complete.", __FUNCTION__);
+  ALOGV("%s: Request exit complete.", __FUNCTION__);
 }
 
 status_t EmulatedCameraHotplugThread::readyToRun() {
-    Mutex::Autolock al(mMutex);
+  Mutex::Autolock al(mMutex);
 
-    mInotifyFd = -1;
+  mInotifyFd = -1;
 
-    do {
-        ALOGV("%s: Initializing inotify", __FUNCTION__);
+  do {
+    ALOGV("%s: Initializing inotify", __FUNCTION__);
 
-        mInotifyFd = inotify_init();
-        if (mInotifyFd == -1) {
-            ALOGE("%s: inotify_init failure error: '%s' (%d)",
-                 __FUNCTION__, strerror(errno), errno);
-            mRunning = false;
-            break;
-        }
-
-        /**
-         * For each fake camera file, add a watch for when
-         * the file is closed (if it was written to)
-         */
-        Vector<int>::const_iterator it, end;
-        it = mSubscribedCameraIds.begin();
-        end = mSubscribedCameraIds.end();
-        for (; it != end; ++it) {
-            int cameraId = *it;
-            if (!addWatch(cameraId)) {
-                mRunning = false;
-                break;
-            }
-        }
-    } while(false);
-
-    if (!mRunning) {
-        status_t err = -errno;
-
-        if (mInotifyFd != -1) {
-            close(mInotifyFd);
-        }
-
-        return err;
+    mInotifyFd = inotify_init();
+    if (mInotifyFd == -1) {
+      ALOGE("%s: inotify_init failure error: '%s' (%d)", __FUNCTION__,
+            strerror(errno), errno);
+      mRunning = false;
+      break;
     }
 
-    return OK;
-}
-
-bool EmulatedCameraHotplugThread::threadLoop() {
-
-    // If requestExit was already called, mRunning will be false
-    while (mRunning) {
-        char buffer[EVENT_BUF_LEN];
-        int length = TEMP_FAILURE_RETRY(
-                        read(mInotifyFd, buffer, EVENT_BUF_LEN));
-
-        if (length < 0) {
-            ALOGE("%s: Error reading from inotify FD, error: '%s' (%d)",
-                 __FUNCTION__, strerror(errno),
-                 errno);
-            mRunning = false;
-            break;
-        }
-
-        ALOGV("%s: Read %d bytes from inotify FD", __FUNCTION__, length);
-
-        int i = 0;
-        while (i < length) {
-            inotify_event* event = (inotify_event*) &buffer[i];
-
-            if (event->mask & IN_IGNORED) {
-                Mutex::Autolock al(mMutex);
-                if (!mRunning) {
-                    ALOGV("%s: Shutting down thread", __FUNCTION__);
-                    break;
-                } else {
-                    ALOGE("%s: File was deleted, aborting",
-                          __FUNCTION__);
-                    mRunning = false;
-                    break;
-                }
-            } else if (event->mask & IN_CLOSE_WRITE) {
-                int cameraId = getCameraId(event->wd);
-
-                if (cameraId < 0) {
-                    ALOGE("%s: Got bad camera ID from WD '%d",
-                          __FUNCTION__, event->wd);
-                } else {
-                    // Check the file for the new hotplug event
-                    String8 filePath = getFilePath(cameraId);
-                    /**
-                     * NOTE: we carefully avoid getting an inotify
-                     * for the same exact file because it's opened for
-                     * read-only, but our inotify is for write-only
-                     */
-                    int newStatus = readFile(filePath);
-
-                    if (newStatus < 0) {
-                        mRunning = false;
-                        break;
-                    }
-
-                    int halStatus = newStatus ?
-                        CAMERA_DEVICE_STATUS_PRESENT :
-                        CAMERA_DEVICE_STATUS_NOT_PRESENT;
-                    EmulatedCameraFactory::Instance().onStatusChanged(cameraId,
-                                                                      halStatus);
-                }
-
-            } else {
-                ALOGW("%s: Unknown mask 0x%x",
-                      __FUNCTION__, event->mask);
-            }
-
-            i += EVENT_SIZE + event->len;
-        }
-    }
-
-    if (!mRunning) {
-        close(mInotifyFd);
-        return false;
-    }
-
-    return true;
-}
-
-String8 EmulatedCameraHotplugThread::getFilePath(int cameraId) const {
-    return String8::format(FAKE_HOTPLUG_FILE ".%d", cameraId);
-}
-
-bool EmulatedCameraHotplugThread::createFileIfNotExists(int cameraId) const
-{
-    String8 filePath = getFilePath(cameraId);
-    // make sure this file exists and we have access to it
-    int fd = TEMP_FAILURE_RETRY(
-                open(filePath.string(), O_WRONLY | O_CREAT | O_TRUNC,
-                     /* mode = ug+rwx */ S_IRWXU | S_IRWXG ));
-    if (fd == -1) {
-        ALOGE("%s: Could not create file '%s', error: '%s' (%d)",
-             __FUNCTION__, filePath.string(), strerror(errno), errno);
-        return false;
-    }
-
-    // File has '1' by default since we are plugged in by default
-    if (TEMP_FAILURE_RETRY(write(fd, "1\n", /*count*/2)) == -1) {
-        ALOGE("%s: Could not write '1' to file '%s', error: '%s' (%d)",
-             __FUNCTION__, filePath.string(), strerror(errno), errno);
-        return false;
-    }
-
-    close(fd);
-    return true;
-}
-
-int EmulatedCameraHotplugThread::getCameraId(String8 filePath) const {
+    /**
+     * For each fake camera file, add a watch for when
+     * the file is closed (if it was written to)
+     */
     Vector<int>::const_iterator it, end;
     it = mSubscribedCameraIds.begin();
     end = mSubscribedCameraIds.end();
     for (; it != end; ++it) {
-        String8 camPath = getFilePath(*it);
+      int cameraId = *it;
+      if (!addWatch(cameraId)) {
+        mRunning = false;
+        break;
+      }
+    }
+  } while (false);
 
-        if (camPath == filePath) {
-            return *it;
-        }
+  if (!mRunning) {
+    status_t err = -errno;
+
+    if (mInotifyFd != -1) {
+      close(mInotifyFd);
     }
 
-    return NAME_NOT_FOUND;
+    return err;
+  }
+
+  return OK;
+}
+
+bool EmulatedCameraHotplugThread::threadLoop() {
+  // If requestExit was already called, mRunning will be false
+  while (mRunning) {
+    char buffer[EVENT_BUF_LEN];
+    int length = TEMP_FAILURE_RETRY(read(mInotifyFd, buffer, EVENT_BUF_LEN));
+
+    if (length < 0) {
+      ALOGE("%s: Error reading from inotify FD, error: '%s' (%d)", __FUNCTION__,
+            strerror(errno), errno);
+      mRunning = false;
+      break;
+    }
+
+    ALOGV("%s: Read %d bytes from inotify FD", __FUNCTION__, length);
+
+    int i = 0;
+    while (i < length) {
+      inotify_event* event = (inotify_event*)&buffer[i];
+
+      if (event->mask & IN_IGNORED) {
+        Mutex::Autolock al(mMutex);
+        if (!mRunning) {
+          ALOGV("%s: Shutting down thread", __FUNCTION__);
+          break;
+        } else {
+          ALOGE("%s: File was deleted, aborting", __FUNCTION__);
+          mRunning = false;
+          break;
+        }
+      } else if (event->mask & IN_CLOSE_WRITE) {
+        int cameraId = getCameraId(event->wd);
+
+        if (cameraId < 0) {
+          ALOGE("%s: Got bad camera ID from WD '%d", __FUNCTION__, event->wd);
+        } else {
+          // Check the file for the new hotplug event
+          String8 filePath = getFilePath(cameraId);
+          /**
+           * NOTE: we carefully avoid getting an inotify
+           * for the same exact file because it's opened for
+           * read-only, but our inotify is for write-only
+           */
+          int newStatus = readFile(filePath);
+
+          if (newStatus < 0) {
+            mRunning = false;
+            break;
+          }
+
+          int halStatus = newStatus ? CAMERA_DEVICE_STATUS_PRESENT
+                                    : CAMERA_DEVICE_STATUS_NOT_PRESENT;
+          EmulatedCameraFactory::Instance().onStatusChanged(cameraId,
+                                                            halStatus);
+        }
+
+      } else {
+        ALOGW("%s: Unknown mask 0x%x", __FUNCTION__, event->mask);
+      }
+
+      i += EVENT_SIZE + event->len;
+    }
+  }
+
+  if (!mRunning) {
+    close(mInotifyFd);
+    return false;
+  }
+
+  return true;
+}
+
+String8 EmulatedCameraHotplugThread::getFilePath(int cameraId) const {
+  return String8::format(FAKE_HOTPLUG_FILE ".%d", cameraId);
+}
+
+bool EmulatedCameraHotplugThread::createFileIfNotExists(int cameraId) const {
+  String8 filePath = getFilePath(cameraId);
+  // make sure this file exists and we have access to it
+  int fd =
+      TEMP_FAILURE_RETRY(open(filePath.string(), O_WRONLY | O_CREAT | O_TRUNC,
+                              /* mode = ug+rwx */ S_IRWXU | S_IRWXG));
+  if (fd == -1) {
+    ALOGE("%s: Could not create file '%s', error: '%s' (%d)", __FUNCTION__,
+          filePath.string(), strerror(errno), errno);
+    return false;
+  }
+
+  // File has '1' by default since we are plugged in by default
+  if (TEMP_FAILURE_RETRY(write(fd, "1\n", /*count*/ 2)) == -1) {
+    ALOGE("%s: Could not write '1' to file '%s', error: '%s' (%d)",
+          __FUNCTION__, filePath.string(), strerror(errno), errno);
+    return false;
+  }
+
+  close(fd);
+  return true;
+}
+
+int EmulatedCameraHotplugThread::getCameraId(String8 filePath) const {
+  Vector<int>::const_iterator it, end;
+  it = mSubscribedCameraIds.begin();
+  end = mSubscribedCameraIds.end();
+  for (; it != end; ++it) {
+    String8 camPath = getFilePath(*it);
+
+    if (camPath == filePath) {
+      return *it;
+    }
+  }
+
+  return NAME_NOT_FOUND;
 }
 
 int EmulatedCameraHotplugThread::getCameraId(int wd) const {
-    for (size_t i = 0; i < mSubscribers.size(); ++i) {
-        if (mSubscribers[i].WatchID == wd) {
-            return mSubscribers[i].CameraID;
-        }
+  for (size_t i = 0; i < mSubscribers.size(); ++i) {
+    if (mSubscribers[i].WatchID == wd) {
+      return mSubscribers[i].CameraID;
     }
+  }
 
-    return NAME_NOT_FOUND;
+  return NAME_NOT_FOUND;
 }
 
-SubscriberInfo* EmulatedCameraHotplugThread::getSubscriberInfo(int cameraId)
-{
-    for (size_t i = 0; i < mSubscribers.size(); ++i) {
-        if (mSubscribers[i].CameraID == cameraId) {
-            return (SubscriberInfo*)&mSubscribers[i];
-        }
+SubscriberInfo* EmulatedCameraHotplugThread::getSubscriberInfo(int cameraId) {
+  for (size_t i = 0; i < mSubscribers.size(); ++i) {
+    if (mSubscribers[i].CameraID == cameraId) {
+      return (SubscriberInfo*)&mSubscribers[i];
     }
+  }
 
-    return NULL;
+  return NULL;
 }
 
 bool EmulatedCameraHotplugThread::addWatch(int cameraId) {
-    String8 camPath = getFilePath(cameraId);
-    int wd = inotify_add_watch(mInotifyFd,
-                               camPath.string(),
-                               IN_CLOSE_WRITE);
+  String8 camPath = getFilePath(cameraId);
+  int wd = inotify_add_watch(mInotifyFd, camPath.string(), IN_CLOSE_WRITE);
 
-    if (wd == -1) {
-        ALOGE("%s: Could not add watch for '%s', error: '%s' (%d)",
-             __FUNCTION__, camPath.string(), strerror(errno),
-             errno);
+  if (wd == -1) {
+    ALOGE("%s: Could not add watch for '%s', error: '%s' (%d)", __FUNCTION__,
+          camPath.string(), strerror(errno), errno);
 
-        mRunning = false;
-        return false;
-    }
+    mRunning = false;
+    return false;
+  }
 
-    ALOGV("%s: Watch added for camID='%d', wd='%d'",
-          __FUNCTION__, cameraId, wd);
+  ALOGV("%s: Watch added for camID='%d', wd='%d'", __FUNCTION__, cameraId, wd);
 
-    SubscriberInfo si = { cameraId, wd };
-    mSubscribers.push_back(si);
+  SubscriberInfo si = {cameraId, wd};
+  mSubscribers.push_back(si);
 
-    return true;
+  return true;
 }
 
 bool EmulatedCameraHotplugThread::removeWatch(int cameraId) {
-    SubscriberInfo* si = getSubscriberInfo(cameraId);
+  SubscriberInfo* si = getSubscriberInfo(cameraId);
 
-    if (!si) return false;
+  if (!si) return false;
 
-    if (inotify_rm_watch(mInotifyFd, si->WatchID) == -1) {
+  if (inotify_rm_watch(mInotifyFd, si->WatchID) == -1) {
+    ALOGE("%s: Could not remove watch for camID '%d', error: '%s' (%d)",
+          __FUNCTION__, cameraId, strerror(errno), errno);
 
-        ALOGE("%s: Could not remove watch for camID '%d', error: '%s' (%d)",
-             __FUNCTION__, cameraId, strerror(errno),
-             errno);
+    return false;
+  }
 
-        return false;
+  Vector<SubscriberInfo>::iterator it;
+  for (it = mSubscribers.begin(); it != mSubscribers.end(); ++it) {
+    if (it->CameraID == cameraId) {
+      break;
     }
+  }
 
-    Vector<SubscriberInfo>::iterator it;
-    for (it = mSubscribers.begin(); it != mSubscribers.end(); ++it) {
-        if (it->CameraID == cameraId) {
-            break;
-        }
-    }
+  if (it != mSubscribers.end()) {
+    mSubscribers.erase(it);
+  }
 
-    if (it != mSubscribers.end()) {
-        mSubscribers.erase(it);
-    }
-
-    return true;
+  return true;
 }
 
 int EmulatedCameraHotplugThread::readFile(String8 filePath) const {
+  int fd = TEMP_FAILURE_RETRY(open(filePath.string(), O_RDONLY, /*mode*/ 0));
+  if (fd == -1) {
+    ALOGE("%s: Could not open file '%s', error: '%s' (%d)", __FUNCTION__,
+          filePath.string(), strerror(errno), errno);
+    return -1;
+  }
 
-    int fd = TEMP_FAILURE_RETRY(
-                open(filePath.string(), O_RDONLY, /*mode*/0));
-    if (fd == -1) {
-        ALOGE("%s: Could not open file '%s', error: '%s' (%d)",
-             __FUNCTION__, filePath.string(), strerror(errno), errno);
-        return -1;
-    }
+  char buffer[1];
+  int length;
 
-    char buffer[1];
-    int length;
+  length = TEMP_FAILURE_RETRY(read(fd, buffer, sizeof(buffer)));
 
-    length = TEMP_FAILURE_RETRY(
-                    read(fd, buffer, sizeof(buffer)));
+  int retval;
 
-    int retval;
+  ALOGV("%s: Read file '%s', length='%d', buffer='%c'", __FUNCTION__,
+        filePath.string(), length, buffer[0]);
 
-    ALOGV("%s: Read file '%s', length='%d', buffer='%c'",
-         __FUNCTION__, filePath.string(), length, buffer[0]);
+  if (length == 0) {  // EOF
+    retval = 0;       // empty file is the same thing as 0
+  } else if (buffer[0] == '0') {
+    retval = 0;
+  } else {  // anything non-empty that's not beginning with '0'
+    retval = 1;
+  }
 
-    if (length == 0) { // EOF
-        retval = 0; // empty file is the same thing as 0
-    } else if (buffer[0] == '0') {
-        retval = 0;
-    } else { // anything non-empty that's not beginning with '0'
-        retval = 1;
-    }
+  close(fd);
 
-    close(fd);
-
-    return retval;
+  return retval;
 }
 
-} //namespace android
+}  // namespace android
diff --git a/guest/hals/camera/EmulatedCameraHotplugThread.h b/guest/hals/camera/EmulatedCameraHotplugThread.h
index 145322e..1cc47e1 100644
--- a/guest/hals/camera/EmulatedCameraHotplugThread.h
+++ b/guest/hals/camera/EmulatedCameraHotplugThread.h
@@ -25,53 +25,51 @@
  * Refer to FAKE_HOTPLUG_FILE in EmulatedCameraHotplugThread.cpp
  */
 
-#include "EmulatedCamera2.h"
 #include <utils/String8.h>
 #include <utils/Vector.h>
+#include "EmulatedCamera2.h"
 
 namespace android {
 class EmulatedCameraHotplugThread : public Thread {
-  public:
-    EmulatedCameraHotplugThread(size_t totalCameraCount);
-    ~EmulatedCameraHotplugThread();
+ public:
+  EmulatedCameraHotplugThread(size_t totalCameraCount);
+  ~EmulatedCameraHotplugThread();
 
-    virtual void requestExit();
-    virtual status_t requestExitAndWait();
+  virtual void requestExit();
+  virtual status_t requestExitAndWait();
 
-  private:
+ private:
+  virtual status_t readyToRun();
+  virtual bool threadLoop();
 
+  struct SubscriberInfo {
+    int CameraID;
+    int WatchID;
+  };
 
-    virtual status_t readyToRun();
-    virtual bool threadLoop();
+  bool addWatch(int cameraId);
+  bool removeWatch(int cameraId);
+  SubscriberInfo* getSubscriberInfo(int cameraId);
 
-    struct SubscriberInfo {
-        int CameraID;
-        int WatchID;
-    };
+  int getCameraId(String8 filePath) const;
+  int getCameraId(int wd) const;
 
-    bool addWatch(int cameraId);
-    bool removeWatch(int cameraId);
-    SubscriberInfo* getSubscriberInfo(int cameraId);
+  String8 getFilePath(int cameraId) const;
+  int readFile(String8 filePath) const;
 
-    int getCameraId(String8 filePath) const;
-    int getCameraId(int wd) const;
+  bool createFileIfNotExists(int cameraId) const;
 
-    String8 getFilePath(int cameraId) const;
-    int readFile(String8 filePath) const;
+  int mInotifyFd;
+  Vector<int> mSubscribedCameraIds;
+  Vector<SubscriberInfo> mSubscribers;
 
-    bool createFileIfNotExists(int cameraId) const;
+  // variables above are unguarded:
+  // -- accessed in thread loop or in constructor only
 
-    int mInotifyFd;
-    Vector<int> mSubscribedCameraIds;
-    Vector<SubscriberInfo> mSubscribers;
+  Mutex mMutex;
 
-    // variables above are unguarded:
-    // -- accessed in thread loop or in constructor only
-
-    Mutex mMutex;
-
-    bool mRunning;          // guarding only when it's important
+  bool mRunning;  // guarding only when it's important
 };
-} // namespace android
+}  // namespace android
 
 #endif
diff --git a/guest/hals/camera/EmulatedFakeCamera.cpp b/guest/hals/camera/EmulatedFakeCamera.cpp
index 55dbc6c..376628e 100644
--- a/guest/hals/camera/EmulatedFakeCamera.cpp
+++ b/guest/hals/camera/EmulatedFakeCamera.cpp
@@ -21,91 +21,82 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_FakeCamera"
-#include <sstream>
-#include <string>
+#include "EmulatedFakeCamera.h"
 #include <cutils/log.h>
 #include <cutils/properties.h>
-#include "EmulatedFakeCamera.h"
+#include <sstream>
+#include <string>
 #include "EmulatedCameraFactory.h"
 
 namespace android {
 
-EmulatedFakeCamera::EmulatedFakeCamera(int cameraId,
-                                       bool facingBack,
+EmulatedFakeCamera::EmulatedFakeCamera(int cameraId, bool facingBack,
                                        struct hw_module_t* module)
-        : EmulatedCamera(cameraId, module),
-          mFacingBack(facingBack),
-          mFakeCameraDevice(this)
-{
-}
+    : EmulatedCamera(cameraId, module),
+      mFacingBack(facingBack),
+      mFakeCameraDevice(this) {}
 
-EmulatedFakeCamera::~EmulatedFakeCamera()
-{
-}
+EmulatedFakeCamera::~EmulatedFakeCamera() {}
 
 /****************************************************************************
  * Public API overrides
  ***************************************************************************/
 
-status_t EmulatedFakeCamera::Initialize(const cvd::CameraDefinition& params)
-{
-    status_t res = mFakeCameraDevice.Initialize();
-    if (res != NO_ERROR) {
-        return res;
+status_t EmulatedFakeCamera::Initialize(const cvd::CameraDefinition& params) {
+  status_t res = mFakeCameraDevice.Initialize();
+  if (res != NO_ERROR) {
+    return res;
+  }
+
+  const char* facing =
+      mFacingBack ? EmulatedCamera::FACING_BACK : EmulatedCamera::FACING_FRONT;
+
+  mParameters.set(EmulatedCamera::FACING_KEY, facing);
+  ALOGD("%s: Fake camera is facing %s", __FUNCTION__, facing);
+
+  mParameters.set(EmulatedCamera::ORIENTATION_KEY,
+                  EmulatedCameraFactory::Instance().getFakeCameraOrientation());
+
+  res = EmulatedCamera::Initialize(params);
+  if (res != NO_ERROR) {
+    return res;
+  }
+
+  /*
+   * Parameters provided by the camera device.
+   */
+
+  /* 352x288 and 320x240 frame dimensions are required by the framework for
+   * video mode preview and video recording. */
+  std::ostringstream resolutions;
+  for (size_t index = 0; index < params.resolutions.size(); ++index) {
+    if (resolutions.str().size()) {
+      resolutions << ",";
     }
+    resolutions << params.resolutions[index].width << "x"
+                << params.resolutions[index].height;
+  }
 
-    const char* facing = mFacingBack ? EmulatedCamera::FACING_BACK :
-                                       EmulatedCamera::FACING_FRONT;
+  mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES,
+                  resolutions.str().c_str());
+  mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES,
+                  resolutions.str().c_str());
+  mParameters.setPreviewSize(640, 480);
+  mParameters.setPictureSize(640, 480);
 
-    mParameters.set(EmulatedCamera::FACING_KEY, facing);
-    ALOGD("%s: Fake camera is facing %s", __FUNCTION__, facing);
+  mParameters.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING,
+                  CameraParameters::ANTIBANDING_AUTO);
+  mParameters.set(CameraParameters::KEY_ANTIBANDING,
+                  CameraParameters::ANTIBANDING_AUTO);
+  mParameters.set(CameraParameters::KEY_SUPPORTED_EFFECTS,
+                  CameraParameters::EFFECT_NONE);
+  mParameters.set(CameraParameters::KEY_EFFECT, CameraParameters::EFFECT_NONE);
 
-    mParameters.set(EmulatedCamera::ORIENTATION_KEY,
-                    EmulatedCameraFactory::Instance().getFakeCameraOrientation());
-
-    res = EmulatedCamera::Initialize(params);
-    if (res != NO_ERROR) {
-        return res;
-    }
-
-    /*
-     * Parameters provided by the camera device.
-     */
-
-    /* 352x288 and 320x240 frame dimensions are required by the framework for
-     * video mode preview and video recording. */
-    std::ostringstream resolutions;
-    for (size_t index = 0; index < params.resolutions.size(); ++index) {
-      if (resolutions.str().size()) {
-        resolutions << ",";
-      }
-      resolutions << params.resolutions[index].width << "x"
-                  << params.resolutions[index].height;
-    }
-
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES,
-                    resolutions.str().c_str());
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES,
-                    resolutions.str().c_str());
-    mParameters.setPreviewSize(640, 480);
-    mParameters.setPictureSize(640, 480);
-
-    mParameters.set(CameraParameters::KEY_SUPPORTED_ANTIBANDING,
-                    CameraParameters::ANTIBANDING_AUTO);
-    mParameters.set(CameraParameters::KEY_ANTIBANDING,
-                    CameraParameters::ANTIBANDING_AUTO);
-    mParameters.set(CameraParameters::KEY_SUPPORTED_EFFECTS,
-                    CameraParameters::EFFECT_NONE);
-    mParameters.set(CameraParameters::KEY_EFFECT,
-                    CameraParameters::EFFECT_NONE);
-
-
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
-EmulatedCameraDevice* EmulatedFakeCamera::getCameraDevice()
-{
-    return &mFakeCameraDevice;
+EmulatedCameraDevice* EmulatedFakeCamera::getCameraDevice() {
+  return &mFakeCameraDevice;
 }
 
-};  /* namespace android */
+}; /* namespace android */
diff --git a/guest/hals/camera/EmulatedFakeCamera.h b/guest/hals/camera/EmulatedFakeCamera.h
index 8657df9..5afba3f 100644
--- a/guest/hals/camera/EmulatedFakeCamera.h
+++ b/guest/hals/camera/EmulatedFakeCamera.h
@@ -33,42 +33,42 @@
  * instance that emulates a fake camera device.
  */
 class EmulatedFakeCamera : public EmulatedCamera {
-public:
-    /* Constructs EmulatedFakeCamera instance. */
-    EmulatedFakeCamera(int cameraId, bool facingBack, struct hw_module_t* module);
+ public:
+  /* Constructs EmulatedFakeCamera instance. */
+  EmulatedFakeCamera(int cameraId, bool facingBack, struct hw_module_t* module);
 
-    /* Destructs EmulatedFakeCamera instance. */
-    ~EmulatedFakeCamera();
+  /* Destructs EmulatedFakeCamera instance. */
+  ~EmulatedFakeCamera();
 
-    /****************************************************************************
-     * EmulatedCamera virtual overrides.
-     ***************************************************************************/
+  /****************************************************************************
+   * EmulatedCamera virtual overrides.
+   ***************************************************************************/
 
-public:
-    /* Initializes EmulatedFakeCamera instance. */
-     status_t Initialize(const cvd::CameraDefinition& params);
+ public:
+  /* Initializes EmulatedFakeCamera instance. */
+  status_t Initialize(const cvd::CameraDefinition& params);
 
-    /****************************************************************************
-     * EmulatedCamera abstract API implementation.
-     ***************************************************************************/
+  /****************************************************************************
+   * EmulatedCamera abstract API implementation.
+   ***************************************************************************/
 
-protected:
-    /* Gets emulated camera device ised by this instance of the emulated camera.
-     */
-    EmulatedCameraDevice* getCameraDevice();
+ protected:
+  /* Gets emulated camera device ised by this instance of the emulated camera.
+   */
+  EmulatedCameraDevice* getCameraDevice();
 
-    /****************************************************************************
-     * Data memebers.
-     ***************************************************************************/
+  /****************************************************************************
+   * Data memebers.
+   ***************************************************************************/
 
-protected:
-    /* Facing back (true) or front (false) switch. */
-    bool                        mFacingBack;
+ protected:
+  /* Facing back (true) or front (false) switch. */
+  bool mFacingBack;
 
-    /* Contained fake camera device object. */
-    EmulatedFakeCameraDevice    mFakeCameraDevice;
+  /* Contained fake camera device object. */
+  EmulatedFakeCameraDevice mFakeCameraDevice;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_H */
diff --git a/guest/hals/camera/EmulatedFakeCamera2.cpp b/guest/hals/camera/EmulatedFakeCamera2.cpp
index e5f2387..9a2f082 100644
--- a/guest/hals/camera/EmulatedFakeCamera2.cpp
+++ b/guest/hals/camera/EmulatedFakeCamera2.cpp
@@ -27,11 +27,11 @@
 #define LOG_TAG "EmulatedCamera_FakeCamera2"
 #include <utils/Log.h>
 
+#include "EmulatedCameraFactory.h"
+#include "EmulatedFakeCamera2.h"
+#include "GrallocModule.h"
 #include "common/libs/auto_resources/auto_resources.h"
 #include "guest/libs/platform_support/api_level_fixes.h"
-#include "EmulatedFakeCamera2.h"
-#include "EmulatedCameraFactory.h"
-#include "GrallocModule.h"
 
 #define ERROR_CAMERA_NOT_PRESENT -EPIPE
 
@@ -45,13 +45,11 @@
 
 const uint32_t EmulatedFakeCamera2::kAvailableFormats[] = {
 #if VSOC_PLATFORM_SDK_AFTER(K)
-        HAL_PIXEL_FORMAT_RAW16,
+    HAL_PIXEL_FORMAT_RAW16,
 #endif
-        HAL_PIXEL_FORMAT_BLOB,
-        HAL_PIXEL_FORMAT_RGBA_8888,
-        //        HAL_PIXEL_FORMAT_YV12,
-        HAL_PIXEL_FORMAT_YCrCb_420_SP
-};
+    HAL_PIXEL_FORMAT_BLOB, HAL_PIXEL_FORMAT_RGBA_8888,
+    //        HAL_PIXEL_FORMAT_YV12,
+    HAL_PIXEL_FORMAT_YCrCb_420_SP};
 
 const uint32_t EmulatedFakeCamera2::kAvailableRawSizes[2] = {
     640, 480
@@ -59,8 +57,7 @@
 };
 
 const uint64_t EmulatedFakeCamera2::kAvailableRawMinDurations[1] = {
-    static_cast<uint64_t>(Sensor::kFrameDurationRange[0])
-};
+    static_cast<uint64_t>(Sensor::kFrameDurationRange[0])};
 
 const uint32_t EmulatedFakeCamera2::kAvailableProcessedSizesBack[4] = {
     640, 480, 320, 240
@@ -73,8 +70,7 @@
 };
 
 const uint64_t EmulatedFakeCamera2::kAvailableProcessedMinDurations[1] = {
-    static_cast<uint64_t>(Sensor::kFrameDurationRange[0])
-};
+    static_cast<uint64_t>(Sensor::kFrameDurationRange[0])};
 
 const uint32_t EmulatedFakeCamera2::kAvailableJpegSizesBack[2] = {
     640, 480
@@ -86,233 +82,225 @@
     //    mSensorWidth, mSensorHeight
 };
 
-
 const uint64_t EmulatedFakeCamera2::kAvailableJpegMinDurations[1] = {
-    static_cast<uint64_t>(Sensor::kFrameDurationRange[0])
-};
+    static_cast<uint64_t>(Sensor::kFrameDurationRange[0])};
 
-
-EmulatedFakeCamera2::EmulatedFakeCamera2(int cameraId,
-        bool facingBack,
-        struct hw_module_t* module)
-        : EmulatedCamera2(cameraId,module),
-          mFacingBack(facingBack),
-          mIsConnected(false)
-{
-    ALOGD("Constructing emulated fake camera 2 facing %s",
-            facingBack ? "back" : "front");
+EmulatedFakeCamera2::EmulatedFakeCamera2(int cameraId, bool facingBack,
+                                         struct hw_module_t *module)
+    : EmulatedCamera2(cameraId, module),
+      mFacingBack(facingBack),
+      mIsConnected(false) {
+  ALOGD("Constructing emulated fake camera 2 facing %s",
+        facingBack ? "back" : "front");
 }
 
 EmulatedFakeCamera2::~EmulatedFakeCamera2() {
-    if (mCameraInfo != NULL) {
-        free_camera_metadata(mCameraInfo);
-    }
+  if (mCameraInfo != NULL) {
+    free_camera_metadata(mCameraInfo);
+  }
 }
 
 /****************************************************************************
  * Public API overrides
  ***************************************************************************/
 
-status_t EmulatedFakeCamera2::Initialize(const cvd::CameraDefinition& params) {
-    status_t res;
+status_t EmulatedFakeCamera2::Initialize(const cvd::CameraDefinition &params) {
+  status_t res;
 
-    for (size_t index = 0; index < params.resolutions.size(); ++index) {
-        mAvailableRawSizes.push_back(params.resolutions[index].width);
-        mAvailableRawSizes.push_back(params.resolutions[index].height);
-        mAvailableProcessedSizes.push_back(params.resolutions[index].width);
-        mAvailableProcessedSizes.push_back(params.resolutions[index].height);
-        mAvailableJpegSizes.push_back(params.resolutions[index].width);
-        mAvailableJpegSizes.push_back(params.resolutions[index].height);
+  for (size_t index = 0; index < params.resolutions.size(); ++index) {
+    mAvailableRawSizes.push_back(params.resolutions[index].width);
+    mAvailableRawSizes.push_back(params.resolutions[index].height);
+    mAvailableProcessedSizes.push_back(params.resolutions[index].width);
+    mAvailableProcessedSizes.push_back(params.resolutions[index].height);
+    mAvailableJpegSizes.push_back(params.resolutions[index].width);
+    mAvailableJpegSizes.push_back(params.resolutions[index].height);
+  }
+
+  // Find max width/height
+  int32_t width = 0, height = 0;
+  for (size_t index = 0; index < params.resolutions.size(); ++index) {
+    if (width <= params.resolutions[index].width &&
+        height <= params.resolutions[index].height) {
+      width = params.resolutions[index].width;
+      height = params.resolutions[index].height;
     }
+  }
+  if (width < 640 || height < 480) {
+    width = 640;
+    height = 480;
+  }
+  mSensorWidth = width;
+  mSensorHeight = height;
 
-    // Find max width/height
-    int32_t width = 0, height = 0;
-    for (size_t index = 0; index < params.resolutions.size(); ++index) {
-        if (width <= params.resolutions[index].width &&
-            height <= params.resolutions[index].height) {
-            width = params.resolutions[index].width;
-            height = params.resolutions[index].height;
-        }
-    }
-    if (width < 640 || height < 480) {
-        width = 640;
-        height = 480;
-    }
-    mSensorWidth = width;
-    mSensorHeight = height;
+  /* TODO(ender): probably should drop this. */
+  std::copy(kAvailableRawSizes,
+            kAvailableRawSizes + arraysize(kAvailableRawSizes),
+            std::back_inserter(mAvailableRawSizes));
 
-    /* TODO(ender): probably should drop this. */
-    std::copy(kAvailableRawSizes,
-              kAvailableRawSizes + arraysize(kAvailableRawSizes),
-              std::back_inserter(mAvailableRawSizes));
+  if (params.orientation == cvd::CameraDefinition::kFront) {
+    std::copy(kAvailableProcessedSizesFront,
+              kAvailableProcessedSizesFront +
+                  arraysize(kAvailableProcessedSizesFront),
+              std::back_inserter(mAvailableProcessedSizes));
+    std::copy(kAvailableJpegSizesFront,
+              kAvailableJpegSizesFront + arraysize(kAvailableJpegSizesFront),
+              std::back_inserter(mAvailableJpegSizes));
+  } else {
+    std::copy(
+        kAvailableProcessedSizesBack,
+        kAvailableProcessedSizesBack + arraysize(kAvailableProcessedSizesBack),
+        mAvailableProcessedSizes.begin());
+    std::copy(kAvailableJpegSizesBack,
+              kAvailableJpegSizesBack + arraysize(kAvailableJpegSizesBack),
+              mAvailableJpegSizes.begin());
+  }
 
-    if (params.orientation == cvd::CameraDefinition::kFront) {
-      std::copy(kAvailableProcessedSizesFront,
-                kAvailableProcessedSizesFront +
-                arraysize(kAvailableProcessedSizesFront),
-                std::back_inserter(mAvailableProcessedSizes));
-      std::copy(kAvailableJpegSizesFront,
-                kAvailableJpegSizesFront + arraysize(kAvailableJpegSizesFront),
-                std::back_inserter(mAvailableJpegSizes));
-    } else {
-      std::copy(kAvailableProcessedSizesBack,
-                kAvailableProcessedSizesBack +
-                arraysize(kAvailableProcessedSizesBack),
-                mAvailableProcessedSizes.begin());
-      std::copy(kAvailableJpegSizesBack,
-                kAvailableJpegSizesBack + arraysize(kAvailableJpegSizesBack),
-                mAvailableJpegSizes.begin());
-    }
+  res = constructStaticInfo(&mCameraInfo, true);
+  if (res != OK) {
+    ALOGE("%s: Unable to allocate static info: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    return res;
+  }
+  res = constructStaticInfo(&mCameraInfo, false);
+  if (res != OK) {
+    ALOGE("%s: Unable to fill in static info: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    return res;
+  }
+  if (res != OK) return res;
 
-    res = constructStaticInfo(&mCameraInfo, true);
-    if (res != OK) {
-        ALOGE("%s: Unable to allocate static info: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return res;
-    }
-    res = constructStaticInfo(&mCameraInfo, false);
-    if (res != OK) {
-        ALOGE("%s: Unable to fill in static info: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return res;
-    }
-    if (res != OK) return res;
+  mNextStreamId = 1;
+  mNextReprocessStreamId = 1;
+  mRawStreamCount = 0;
+  mProcessedStreamCount = 0;
+  mJpegStreamCount = 0;
+  mReprocessStreamCount = 0;
 
-    mNextStreamId = 1;
-    mNextReprocessStreamId = 1;
-    mRawStreamCount = 0;
-    mProcessedStreamCount = 0;
-    mJpegStreamCount = 0;
-    mReprocessStreamCount = 0;
-
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 /****************************************************************************
  * Camera module API overrides
  ***************************************************************************/
 
-status_t EmulatedFakeCamera2::connectCamera(hw_device_t** device) {
-    status_t res;
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedFakeCamera2::connectCamera(hw_device_t **device) {
+  status_t res;
+  ALOGV("%s", __FUNCTION__);
 
-    {
-        Mutex::Autolock l(mMutex);
-        if (!mStatusPresent) {
-            ALOGE("%s: Camera ID %d is unplugged", __FUNCTION__,
-                  mCameraID);
-            return -ENODEV;
-        }
+  {
+    Mutex::Autolock l(mMutex);
+    if (!mStatusPresent) {
+      ALOGE("%s: Camera ID %d is unplugged", __FUNCTION__, mCameraID);
+      return -ENODEV;
     }
+  }
 
-    mConfigureThread = new ConfigureThread(this);
-    mReadoutThread = new ReadoutThread(this);
-    mControlThread = new ControlThread(this);
-    mSensor = new Sensor(mSensorWidth, mSensorHeight);
-    mJpegCompressor = new JpegCompressor();
+  mConfigureThread = new ConfigureThread(this);
+  mReadoutThread = new ReadoutThread(this);
+  mControlThread = new ControlThread(this);
+  mSensor = new Sensor(mSensorWidth, mSensorHeight);
+  mJpegCompressor = new JpegCompressor();
 
-    mNextStreamId = 1;
-    mNextReprocessStreamId = 1;
+  mNextStreamId = 1;
+  mNextReprocessStreamId = 1;
 
-    res = mSensor->startUp();
-    if (res != NO_ERROR) return res;
+  res = mSensor->startUp();
+  if (res != NO_ERROR) return res;
 
-    res = mConfigureThread->run("EmulatedFakeCamera2::configureThread");
-    if (res != NO_ERROR) return res;
+  res = mConfigureThread->run("EmulatedFakeCamera2::configureThread");
+  if (res != NO_ERROR) return res;
 
-    res = mReadoutThread->run("EmulatedFakeCamera2::readoutThread");
-    if (res != NO_ERROR) return res;
+  res = mReadoutThread->run("EmulatedFakeCamera2::readoutThread");
+  if (res != NO_ERROR) return res;
 
-    res = mControlThread->run("EmulatedFakeCamera2::controlThread");
-    if (res != NO_ERROR) return res;
+  res = mControlThread->run("EmulatedFakeCamera2::controlThread");
+  if (res != NO_ERROR) return res;
 
-    status_t ret = EmulatedCamera2::connectCamera(device);
+  status_t ret = EmulatedCamera2::connectCamera(device);
 
-    if (ret >= 0) {
-        mIsConnected = true;
-    }
+  if (ret >= 0) {
+    mIsConnected = true;
+  }
 
-    return ret;
+  return ret;
 }
 
 status_t EmulatedFakeCamera2::plugCamera() {
-    {
-        Mutex::Autolock l(mMutex);
+  {
+    Mutex::Autolock l(mMutex);
 
-        if (!mStatusPresent) {
-            ALOGI("%s: Plugged back in", __FUNCTION__);
-            mStatusPresent = true;
-        }
+    if (!mStatusPresent) {
+      ALOGI("%s: Plugged back in", __FUNCTION__);
+      mStatusPresent = true;
     }
+  }
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 status_t EmulatedFakeCamera2::unplugCamera() {
-    {
-        Mutex::Autolock l(mMutex);
+  {
+    Mutex::Autolock l(mMutex);
 
-        if (mStatusPresent) {
-            ALOGI("%s: Unplugged camera", __FUNCTION__);
-            mStatusPresent = false;
-        }
+    if (mStatusPresent) {
+      ALOGI("%s: Unplugged camera", __FUNCTION__);
+      mStatusPresent = false;
     }
+  }
 
-    return closeCamera();
+  return closeCamera();
 }
 
 camera_device_status_t EmulatedFakeCamera2::getHotplugStatus() {
-    Mutex::Autolock l(mMutex);
-    return mStatusPresent ?
-        CAMERA_DEVICE_STATUS_PRESENT :
-        CAMERA_DEVICE_STATUS_NOT_PRESENT;
+  Mutex::Autolock l(mMutex);
+  return mStatusPresent ? CAMERA_DEVICE_STATUS_PRESENT
+                        : CAMERA_DEVICE_STATUS_NOT_PRESENT;
 }
 
-
-
 status_t EmulatedFakeCamera2::closeCamera() {
-    {
-        Mutex::Autolock l(mMutex);
+  {
+    Mutex::Autolock l(mMutex);
 
-        status_t res;
-        ALOGV("%s", __FUNCTION__);
+    status_t res;
+    ALOGV("%s", __FUNCTION__);
 
-        if (!mIsConnected) {
-            return NO_ERROR;
-        }
-
-        res = mSensor->shutDown();
-        if (res != NO_ERROR) {
-            ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, res);
-            return res;
-        }
-
-        mConfigureThread->requestExit();
-        mReadoutThread->requestExit();
-        mControlThread->requestExit();
-        mJpegCompressor->cancel();
+    if (!mIsConnected) {
+      return NO_ERROR;
     }
 
-    // give up the lock since we will now block and the threads
-    // can call back into this object
-    mConfigureThread->join();
-    mReadoutThread->join();
-    mControlThread->join();
-
-    ALOGV("%s exit", __FUNCTION__);
-
-    {
-        Mutex::Autolock l(mMutex);
-        mIsConnected = false;
+    res = mSensor->shutDown();
+    if (res != NO_ERROR) {
+      ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, res);
+      return res;
     }
 
-    return NO_ERROR;
+    mConfigureThread->requestExit();
+    mReadoutThread->requestExit();
+    mControlThread->requestExit();
+    mJpegCompressor->cancel();
+  }
+
+  // give up the lock since we will now block and the threads
+  // can call back into this object
+  mConfigureThread->join();
+  mReadoutThread->join();
+  mControlThread->join();
+
+  ALOGV("%s exit", __FUNCTION__);
+
+  {
+    Mutex::Autolock l(mMutex);
+    mIsConnected = false;
+  }
+
+  return NO_ERROR;
 }
 
 status_t EmulatedFakeCamera2::getCameraInfo(struct camera_info *info) {
-    info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT;
-    info->orientation = EmulatedCameraFactory::Instance().getFakeCameraOrientation();
-    return EmulatedCamera2::getCameraInfo(info);
+  info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT;
+  info->orientation =
+      EmulatedCameraFactory::Instance().getFakeCameraOrientation();
+  return EmulatedCamera2::getCameraInfo(info);
 }
 
 /****************************************************************************
@@ -322,2444 +310,2315 @@
 /** Request input queue */
 
 int EmulatedFakeCamera2::requestQueueNotify() {
-    ALOGV("Request queue notification received");
+  ALOGV("Request queue notification received");
 
-    ALOG_ASSERT(mRequestQueueSrc != NULL,
-            "%s: Request queue src not set, but received queue notification!",
-            __FUNCTION__);
-    ALOG_ASSERT(mFrameQueueDst != NULL,
-            "%s: Request queue src not set, but received queue notification!",
-            __FUNCTION__);
-    ALOG_ASSERT(mStreams.size() != 0,
-            "%s: No streams allocated, but received queue notification!",
-            __FUNCTION__);
-    return mConfigureThread->newRequestAvailable();
+  ALOG_ASSERT(mRequestQueueSrc != NULL,
+              "%s: Request queue src not set, but received queue notification!",
+              __FUNCTION__);
+  ALOG_ASSERT(mFrameQueueDst != NULL,
+              "%s: Request queue src not set, but received queue notification!",
+              __FUNCTION__);
+  ALOG_ASSERT(mStreams.size() != 0,
+              "%s: No streams allocated, but received queue notification!",
+              __FUNCTION__);
+  return mConfigureThread->newRequestAvailable();
 }
 
 int EmulatedFakeCamera2::getInProgressCount() {
-    Mutex::Autolock l(mMutex);
+  Mutex::Autolock l(mMutex);
 
-    if (!mStatusPresent) {
-        ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
-        return ERROR_CAMERA_NOT_PRESENT;
-    }
+  if (!mStatusPresent) {
+    ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+    return ERROR_CAMERA_NOT_PRESENT;
+  }
 
-    int requestCount = 0;
-    requestCount += mConfigureThread->getInProgressCount();
-    requestCount += mReadoutThread->getInProgressCount();
-    requestCount += mJpegCompressor->isBusy() ? 1 : 0;
+  int requestCount = 0;
+  requestCount += mConfigureThread->getInProgressCount();
+  requestCount += mReadoutThread->getInProgressCount();
+  requestCount += mJpegCompressor->isBusy() ? 1 : 0;
 
-    return requestCount;
+  return requestCount;
 }
 
-int EmulatedFakeCamera2::constructDefaultRequest(
-        int request_template,
-        camera_metadata_t **request) {
+int EmulatedFakeCamera2::constructDefaultRequest(int request_template,
+                                                 camera_metadata_t **request) {
+  if (request == NULL) return BAD_VALUE;
+  if (request_template < 0 || request_template >= CAMERA2_TEMPLATE_COUNT) {
+    return BAD_VALUE;
+  }
 
-    if (request == NULL) return BAD_VALUE;
-    if (request_template < 0 || request_template >= CAMERA2_TEMPLATE_COUNT) {
-        return BAD_VALUE;
+  {
+    Mutex::Autolock l(mMutex);
+    if (!mStatusPresent) {
+      ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+      return ERROR_CAMERA_NOT_PRESENT;
     }
+  }
 
-    {
-        Mutex::Autolock l(mMutex);
-        if (!mStatusPresent) {
-            ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
-            return ERROR_CAMERA_NOT_PRESENT;
-        }
-    }
-
-    status_t res;
-    // Pass 1, calculate size and allocate
-    res = constructDefaultRequest(request_template,
-            request,
-            true);
-    if (res != OK) {
-        return res;
-    }
-    // Pass 2, build request
-    res = constructDefaultRequest(request_template,
-            request,
-            false);
-    if (res != OK) {
-        ALOGE("Unable to populate new request for template %d",
-                request_template);
-    }
-
+  status_t res;
+  // Pass 1, calculate size and allocate
+  res = constructDefaultRequest(request_template, request, true);
+  if (res != OK) {
     return res;
+  }
+  // Pass 2, build request
+  res = constructDefaultRequest(request_template, request, false);
+  if (res != OK) {
+    ALOGE("Unable to populate new request for template %d", request_template);
+  }
+
+  return res;
 }
 
 int EmulatedFakeCamera2::allocateStream(
-        uint32_t width,
-        uint32_t height,
-        int format,
-        const camera2_stream_ops_t *stream_ops,
-        uint32_t *stream_id,
-        uint32_t *format_actual,
-        uint32_t *usage,
-        uint32_t *max_buffers) {
-    Mutex::Autolock l(mMutex);
+    uint32_t width, uint32_t height, int format,
+    const camera2_stream_ops_t *stream_ops, uint32_t *stream_id,
+    uint32_t *format_actual, uint32_t *usage, uint32_t *max_buffers) {
+  Mutex::Autolock l(mMutex);
 
-    if (!mStatusPresent) {
-        ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
-        return ERROR_CAMERA_NOT_PRESENT;
+  if (!mStatusPresent) {
+    ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+    return ERROR_CAMERA_NOT_PRESENT;
+  }
+
+  // Temporary shim until FORMAT_ZSL is removed
+  if (format == CAMERA2_HAL_PIXEL_FORMAT_ZSL) {
+    format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+  }
+
+  if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+    unsigned int numFormats = sizeof(kAvailableFormats) / sizeof(uint32_t);
+    unsigned int formatIdx = 0;
+    unsigned int sizeOffsetIdx = 0;
+    for (; formatIdx < numFormats; formatIdx++) {
+      if (format == (int)kAvailableFormats[formatIdx]) break;
     }
-
-    // Temporary shim until FORMAT_ZSL is removed
-    if (format == CAMERA2_HAL_PIXEL_FORMAT_ZSL) {
-        format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    if (formatIdx == numFormats) {
+      ALOGE("%s: Format 0x%x is not supported", __FUNCTION__, format);
+      return BAD_VALUE;
     }
+  }
 
-    if (format != HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
-        unsigned int numFormats = sizeof(kAvailableFormats) / sizeof(uint32_t);
-        unsigned int formatIdx = 0;
-        unsigned int sizeOffsetIdx = 0;
-        for (; formatIdx < numFormats; formatIdx++) {
-            if (format == (int)kAvailableFormats[formatIdx]) break;
-        }
-        if (formatIdx == numFormats) {
-            ALOGE("%s: Format 0x%x is not supported", __FUNCTION__, format);
-            return BAD_VALUE;
-        }
-    }
-
-    const uint32_t *availableSizes;
-    size_t availableSizeCount;
-    switch (format) {
+  const uint32_t *availableSizes;
+  size_t availableSizeCount;
+  switch (format) {
 #if VSOC_PLATFORM_SDK_AFTER(K)
-        case HAL_PIXEL_FORMAT_RAW16:
-            availableSizes = &mAvailableRawSizes.front();
-            availableSizeCount = mAvailableRawSizes.size();
-            break;
+    case HAL_PIXEL_FORMAT_RAW16:
+      availableSizes = &mAvailableRawSizes.front();
+      availableSizeCount = mAvailableRawSizes.size();
+      break;
 #endif
-        case HAL_PIXEL_FORMAT_BLOB:
-            availableSizes = &mAvailableJpegSizes.front();
-            availableSizeCount = mAvailableJpegSizes.size();
-            break;
-        case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
-        case HAL_PIXEL_FORMAT_RGBA_8888:
-        case HAL_PIXEL_FORMAT_YV12:
-        case HAL_PIXEL_FORMAT_YCrCb_420_SP:
-            availableSizes = &mAvailableProcessedSizes.front();
-            availableSizeCount = mAvailableProcessedSizes.size();
-            break;
-        default:
-            ALOGE("%s: Unknown format 0x%x", __FUNCTION__, format);
-            return BAD_VALUE;
-    }
+    case HAL_PIXEL_FORMAT_BLOB:
+      availableSizes = &mAvailableJpegSizes.front();
+      availableSizeCount = mAvailableJpegSizes.size();
+      break;
+    case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+    case HAL_PIXEL_FORMAT_RGBA_8888:
+    case HAL_PIXEL_FORMAT_YV12:
+    case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+      availableSizes = &mAvailableProcessedSizes.front();
+      availableSizeCount = mAvailableProcessedSizes.size();
+      break;
+    default:
+      ALOGE("%s: Unknown format 0x%x", __FUNCTION__, format);
+      return BAD_VALUE;
+  }
 
-    unsigned int resIdx = 0;
-    for (; resIdx < availableSizeCount; resIdx++) {
-        if (availableSizes[resIdx * 2] == width &&
-                availableSizes[resIdx * 2 + 1] == height) break;
-    }
-    if (resIdx == availableSizeCount) {
-        ALOGE("%s: Format 0x%x does not support resolution %d, %d", __FUNCTION__,
-                format, width, height);
-        return BAD_VALUE;
-    }
+  unsigned int resIdx = 0;
+  for (; resIdx < availableSizeCount; resIdx++) {
+    if (availableSizes[resIdx * 2] == width &&
+        availableSizes[resIdx * 2 + 1] == height)
+      break;
+  }
+  if (resIdx == availableSizeCount) {
+    ALOGE("%s: Format 0x%x does not support resolution %d, %d", __FUNCTION__,
+          format, width, height);
+    return BAD_VALUE;
+  }
 
-    switch (format) {
+  switch (format) {
 #if VSOC_PLATFORM_SDK_AFTER(K)
-        case HAL_PIXEL_FORMAT_RAW16:
-            if (mRawStreamCount >= kMaxRawStreamCount) {
-                ALOGE("%s: Cannot allocate another raw stream (%d already allocated)",
-                        __FUNCTION__, mRawStreamCount);
-                return INVALID_OPERATION;
-            }
-            mRawStreamCount++;
-            break;
+    case HAL_PIXEL_FORMAT_RAW16:
+      if (mRawStreamCount >= kMaxRawStreamCount) {
+        ALOGE("%s: Cannot allocate another raw stream (%d already allocated)",
+              __FUNCTION__, mRawStreamCount);
+        return INVALID_OPERATION;
+      }
+      mRawStreamCount++;
+      break;
 #endif
-        case HAL_PIXEL_FORMAT_BLOB:
-            if (mJpegStreamCount >= kMaxJpegStreamCount) {
-                ALOGE("%s: Cannot allocate another JPEG stream (%d already allocated)",
-                        __FUNCTION__, mJpegStreamCount);
-                return INVALID_OPERATION;
-            }
-            mJpegStreamCount++;
-            break;
-        default:
-            if (mProcessedStreamCount >= kMaxProcessedStreamCount) {
-                ALOGE("%s: Cannot allocate another processed stream (%d already allocated)",
-                        __FUNCTION__, mProcessedStreamCount);
-                return INVALID_OPERATION;
-            }
-            mProcessedStreamCount++;
-    }
+    case HAL_PIXEL_FORMAT_BLOB:
+      if (mJpegStreamCount >= kMaxJpegStreamCount) {
+        ALOGE("%s: Cannot allocate another JPEG stream (%d already allocated)",
+              __FUNCTION__, mJpegStreamCount);
+        return INVALID_OPERATION;
+      }
+      mJpegStreamCount++;
+      break;
+    default:
+      if (mProcessedStreamCount >= kMaxProcessedStreamCount) {
+        ALOGE(
+            "%s: Cannot allocate another processed stream (%d already "
+            "allocated)",
+            __FUNCTION__, mProcessedStreamCount);
+        return INVALID_OPERATION;
+      }
+      mProcessedStreamCount++;
+  }
 
-    Stream newStream;
-    newStream.ops = stream_ops;
-    newStream.width = width;
-    newStream.height = height;
-    newStream.format = format;
-    // TODO: Query stride from gralloc
-    newStream.stride = width;
+  Stream newStream;
+  newStream.ops = stream_ops;
+  newStream.width = width;
+  newStream.height = height;
+  newStream.format = format;
+  // TODO: Query stride from gralloc
+  newStream.stride = width;
 
-    mStreams.add(mNextStreamId, newStream);
+  mStreams.add(mNextStreamId, newStream);
 
-    *stream_id = mNextStreamId;
-    if (format_actual) *format_actual = format;
-    *usage = GRALLOC_USAGE_HW_CAMERA_WRITE;
-    *max_buffers = kMaxBufferCount;
+  *stream_id = mNextStreamId;
+  if (format_actual) *format_actual = format;
+  *usage = GRALLOC_USAGE_HW_CAMERA_WRITE;
+  *max_buffers = kMaxBufferCount;
 
-    ALOGV("Stream allocated: %d, %d x %d, 0x%x. U: %x, B: %d",
-            *stream_id, width, height, format, *usage, *max_buffers);
+  ALOGV("Stream allocated: %d, %d x %d, 0x%x. U: %x, B: %d", *stream_id, width,
+        height, format, *usage, *max_buffers);
 
-    mNextStreamId++;
-    return NO_ERROR;
+  mNextStreamId++;
+  return NO_ERROR;
 }
 
-int EmulatedFakeCamera2::registerStreamBuffers(
-            uint32_t stream_id,
-            int num_buffers,
-            buffer_handle_t *buffers) {
-    Mutex::Autolock l(mMutex);
+int EmulatedFakeCamera2::registerStreamBuffers(uint32_t stream_id,
+                                               int num_buffers,
+                                               buffer_handle_t *buffers) {
+  Mutex::Autolock l(mMutex);
 
-    if (!mStatusPresent) {
-        ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
-        return ERROR_CAMERA_NOT_PRESENT;
-    }
+  if (!mStatusPresent) {
+    ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+    return ERROR_CAMERA_NOT_PRESENT;
+  }
 
-    ALOGV("%s: Stream %d registering %d buffers", __FUNCTION__,
-            stream_id, num_buffers);
-    // Need to find out what the final concrete pixel format for our stream is
-    // Assumes that all buffers have the same format.
-    if (num_buffers < 1) {
-        ALOGE("%s: Stream %d only has %d buffers!",
-                __FUNCTION__, stream_id, num_buffers);
-        return BAD_VALUE;
-    }
+  ALOGV("%s: Stream %d registering %d buffers", __FUNCTION__, stream_id,
+        num_buffers);
+  // Need to find out what the final concrete pixel format for our stream is
+  // Assumes that all buffers have the same format.
+  if (num_buffers < 1) {
+    ALOGE("%s: Stream %d only has %d buffers!", __FUNCTION__, stream_id,
+          num_buffers);
+    return BAD_VALUE;
+  }
 
-    ssize_t streamIndex = mStreams.indexOfKey(stream_id);
-    if (streamIndex < 0) {
-        ALOGE("%s: Unknown stream id %d!", __FUNCTION__, stream_id);
-        return BAD_VALUE;
-    }
+  ssize_t streamIndex = mStreams.indexOfKey(stream_id);
+  if (streamIndex < 0) {
+    ALOGE("%s: Unknown stream id %d!", __FUNCTION__, stream_id);
+    return BAD_VALUE;
+  }
 
-    Stream &stream = mStreams.editValueAt(streamIndex);
+  Stream &stream = mStreams.editValueAt(streamIndex);
 
-    int finalFormat = stream.format;
+  int finalFormat = stream.format;
 
-    if (finalFormat == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
-        finalFormat = HAL_PIXEL_FORMAT_RGBA_8888;
-    }
+  if (finalFormat == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+    finalFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+  }
 
-    ALOGV("%s: Stream %d format set to %x, previously %x",
-            __FUNCTION__, stream_id, finalFormat, stream.format);
+  ALOGV("%s: Stream %d format set to %x, previously %x", __FUNCTION__,
+        stream_id, finalFormat, stream.format);
 
-    stream.format = finalFormat;
+  stream.format = finalFormat;
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 int EmulatedFakeCamera2::releaseStream(uint32_t stream_id) {
-    Mutex::Autolock l(mMutex);
+  Mutex::Autolock l(mMutex);
 
-    ssize_t streamIndex = mStreams.indexOfKey(stream_id);
-    if (streamIndex < 0) {
-        ALOGE("%s: Unknown stream id %d!", __FUNCTION__, stream_id);
-        return BAD_VALUE;
-    }
+  ssize_t streamIndex = mStreams.indexOfKey(stream_id);
+  if (streamIndex < 0) {
+    ALOGE("%s: Unknown stream id %d!", __FUNCTION__, stream_id);
+    return BAD_VALUE;
+  }
 
-    if (isStreamInUse(stream_id)) {
-        ALOGE("%s: Cannot release stream %d; in use!", __FUNCTION__,
-                stream_id);
-        return BAD_VALUE;
-    }
+  if (isStreamInUse(stream_id)) {
+    ALOGE("%s: Cannot release stream %d; in use!", __FUNCTION__, stream_id);
+    return BAD_VALUE;
+  }
 
-    switch(mStreams.valueAt(streamIndex).format) {
+  switch (mStreams.valueAt(streamIndex).format) {
 #if VSOC_PLATFORM_SDK_AFTER(K)
-        case HAL_PIXEL_FORMAT_RAW16:
-            mRawStreamCount--;
-            break;
+    case HAL_PIXEL_FORMAT_RAW16:
+      mRawStreamCount--;
+      break;
 #endif
-        case HAL_PIXEL_FORMAT_BLOB:
-            mJpegStreamCount--;
-            break;
-        default:
-            mProcessedStreamCount--;
-            break;
-    }
+    case HAL_PIXEL_FORMAT_BLOB:
+      mJpegStreamCount--;
+      break;
+    default:
+      mProcessedStreamCount--;
+      break;
+  }
 
-    mStreams.removeItemsAt(streamIndex);
+  mStreams.removeItemsAt(streamIndex);
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 int EmulatedFakeCamera2::allocateReprocessStreamFromStream(
-        uint32_t output_stream_id,
-        const camera2_stream_in_ops_t *stream_ops,
-        uint32_t *stream_id) {
-    Mutex::Autolock l(mMutex);
+    uint32_t output_stream_id, const camera2_stream_in_ops_t *stream_ops,
+    uint32_t *stream_id) {
+  Mutex::Autolock l(mMutex);
 
-    if (!mStatusPresent) {
-        ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
-        return ERROR_CAMERA_NOT_PRESENT;
-    }
+  if (!mStatusPresent) {
+    ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+    return ERROR_CAMERA_NOT_PRESENT;
+  }
 
-    ssize_t baseStreamIndex = mStreams.indexOfKey(output_stream_id);
-    if (baseStreamIndex < 0) {
-        ALOGE("%s: Unknown output stream id %d!", __FUNCTION__, output_stream_id);
-        return BAD_VALUE;
-    }
+  ssize_t baseStreamIndex = mStreams.indexOfKey(output_stream_id);
+  if (baseStreamIndex < 0) {
+    ALOGE("%s: Unknown output stream id %d!", __FUNCTION__, output_stream_id);
+    return BAD_VALUE;
+  }
 
-    const Stream &baseStream = mStreams[baseStreamIndex];
+  const Stream &baseStream = mStreams[baseStreamIndex];
 
-    // We'll reprocess anything we produced
+  // We'll reprocess anything we produced
 
-    if (mReprocessStreamCount >= kMaxReprocessStreamCount) {
-        ALOGE("%s: Cannot allocate another reprocess stream (%d already allocated)",
-                __FUNCTION__, mReprocessStreamCount);
-        return INVALID_OPERATION;
-    }
-    mReprocessStreamCount++;
+  if (mReprocessStreamCount >= kMaxReprocessStreamCount) {
+    ALOGE("%s: Cannot allocate another reprocess stream (%d already allocated)",
+          __FUNCTION__, mReprocessStreamCount);
+    return INVALID_OPERATION;
+  }
+  mReprocessStreamCount++;
 
-    ReprocessStream newStream;
-    newStream.ops = stream_ops;
-    newStream.width = baseStream.width;
-    newStream.height = baseStream.height;
-    newStream.format = baseStream.format;
-    newStream.stride = baseStream.stride;
-    newStream.sourceStreamId = output_stream_id;
+  ReprocessStream newStream;
+  newStream.ops = stream_ops;
+  newStream.width = baseStream.width;
+  newStream.height = baseStream.height;
+  newStream.format = baseStream.format;
+  newStream.stride = baseStream.stride;
+  newStream.sourceStreamId = output_stream_id;
 
-    *stream_id = mNextReprocessStreamId;
-    mReprocessStreams.add(mNextReprocessStreamId, newStream);
+  *stream_id = mNextReprocessStreamId;
+  mReprocessStreams.add(mNextReprocessStreamId, newStream);
 
-    ALOGV("Reprocess stream allocated: %d: %d, %d, 0x%x. Parent stream: %d",
-            *stream_id, newStream.width, newStream.height, newStream.format,
-            output_stream_id);
+  ALOGV("Reprocess stream allocated: %d: %d, %d, 0x%x. Parent stream: %d",
+        *stream_id, newStream.width, newStream.height, newStream.format,
+        output_stream_id);
 
-    mNextReprocessStreamId++;
-    return NO_ERROR;
+  mNextReprocessStreamId++;
+  return NO_ERROR;
 }
 
 int EmulatedFakeCamera2::releaseReprocessStream(uint32_t stream_id) {
-    Mutex::Autolock l(mMutex);
+  Mutex::Autolock l(mMutex);
 
-    ssize_t streamIndex = mReprocessStreams.indexOfKey(stream_id);
-    if (streamIndex < 0) {
-        ALOGE("%s: Unknown reprocess stream id %d!", __FUNCTION__, stream_id);
-        return BAD_VALUE;
-    }
+  ssize_t streamIndex = mReprocessStreams.indexOfKey(stream_id);
+  if (streamIndex < 0) {
+    ALOGE("%s: Unknown reprocess stream id %d!", __FUNCTION__, stream_id);
+    return BAD_VALUE;
+  }
 
-    if (isReprocessStreamInUse(stream_id)) {
-        ALOGE("%s: Cannot release reprocessing stream %d; in use!", __FUNCTION__,
-                stream_id);
-        return BAD_VALUE;
-    }
+  if (isReprocessStreamInUse(stream_id)) {
+    ALOGE("%s: Cannot release reprocessing stream %d; in use!", __FUNCTION__,
+          stream_id);
+    return BAD_VALUE;
+  }
 
-    mReprocessStreamCount--;
-    mReprocessStreams.removeItemsAt(streamIndex);
+  mReprocessStreamCount--;
+  mReprocessStreams.removeItemsAt(streamIndex);
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
-int EmulatedFakeCamera2::triggerAction(uint32_t trigger_id,
-        int32_t ext1,
-        int32_t ext2) {
-    Mutex::Autolock l(mMutex);
+int EmulatedFakeCamera2::triggerAction(uint32_t trigger_id, int32_t ext1,
+                                       int32_t ext2) {
+  Mutex::Autolock l(mMutex);
 
-    if (trigger_id == CAMERA2_EXT_TRIGGER_TESTING_DISCONNECT) {
-        ALOGI("%s: Disconnect trigger - camera must be closed", __FUNCTION__);
-        mStatusPresent = false;
+  if (trigger_id == CAMERA2_EXT_TRIGGER_TESTING_DISCONNECT) {
+    ALOGI("%s: Disconnect trigger - camera must be closed", __FUNCTION__);
+    mStatusPresent = false;
 
-        EmulatedCameraFactory::Instance().onStatusChanged(
-                mCameraID,
-                CAMERA_DEVICE_STATUS_NOT_PRESENT);
-    }
+    EmulatedCameraFactory::Instance().onStatusChanged(
+        mCameraID, CAMERA_DEVICE_STATUS_NOT_PRESENT);
+  }
 
-    if (!mStatusPresent) {
-        ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
-        return ERROR_CAMERA_NOT_PRESENT;
-    }
+  if (!mStatusPresent) {
+    ALOGW("%s: Camera was physically disconnected", __FUNCTION__);
+    return ERROR_CAMERA_NOT_PRESENT;
+  }
 
-    return mControlThread->triggerAction(trigger_id,
-            ext1, ext2);
+  return mControlThread->triggerAction(trigger_id, ext1, ext2);
 }
 
 /** Shutdown and debug methods */
 
 int EmulatedFakeCamera2::dump(int fd) {
-    String8 result;
+  String8 result;
 
-    result.appendFormat("    Camera HAL device: EmulatedFakeCamera2\n");
-    result.appendFormat("      Streams:\n");
-    for (size_t i = 0; i < mStreams.size(); i++) {
-        int id = mStreams.keyAt(i);
-        const Stream& s = mStreams.valueAt(i);
-        result.appendFormat(
-            "         Stream %d: %d x %d, format 0x%x, stride %d\n",
-            id, s.width, s.height, s.format, s.stride);
-    }
+  result.appendFormat("    Camera HAL device: EmulatedFakeCamera2\n");
+  result.appendFormat("      Streams:\n");
+  for (size_t i = 0; i < mStreams.size(); i++) {
+    int id = mStreams.keyAt(i);
+    const Stream &s = mStreams.valueAt(i);
+    result.appendFormat("         Stream %d: %d x %d, format 0x%x, stride %d\n",
+                        id, s.width, s.height, s.format, s.stride);
+  }
 
-    write(fd, result.string(), result.size());
+  write(fd, result.string(), result.size());
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 void EmulatedFakeCamera2::signalError() {
-    // TODO: Let parent know so we can shut down cleanly
-    ALOGE("Worker thread is signaling a serious error");
+  // TODO: Let parent know so we can shut down cleanly
+  ALOGE("Worker thread is signaling a serious error");
 }
 
 /** Pipeline control worker thread methods */
 
-EmulatedFakeCamera2::ConfigureThread::ConfigureThread(EmulatedFakeCamera2 *parent):
-        Thread(false),
-        mParent(parent),
-        mRequestCount(0),
-        mNextBuffers(NULL) {
-    mRunning = false;
+EmulatedFakeCamera2::ConfigureThread::ConfigureThread(
+    EmulatedFakeCamera2 *parent)
+    : Thread(false), mParent(parent), mRequestCount(0), mNextBuffers(NULL) {
+  mRunning = false;
 }
 
-EmulatedFakeCamera2::ConfigureThread::~ConfigureThread() {
-}
+EmulatedFakeCamera2::ConfigureThread::~ConfigureThread() {}
 
 status_t EmulatedFakeCamera2::ConfigureThread::readyToRun() {
-    Mutex::Autolock lock(mInputMutex);
+  Mutex::Autolock lock(mInputMutex);
 
-    ALOGV("Starting up ConfigureThread");
-    mRequest = NULL;
-    mActive  = false;
-    mRunning = true;
+  ALOGV("Starting up ConfigureThread");
+  mRequest = NULL;
+  mActive = false;
+  mRunning = true;
 
-    mInputSignal.signal();
-    return NO_ERROR;
+  mInputSignal.signal();
+  return NO_ERROR;
 }
 
 status_t EmulatedFakeCamera2::ConfigureThread::waitUntilRunning() {
-    Mutex::Autolock lock(mInputMutex);
-    if (!mRunning) {
-        ALOGV("Waiting for configure thread to start");
-        mInputSignal.wait(mInputMutex);
-    }
-    return OK;
+  Mutex::Autolock lock(mInputMutex);
+  if (!mRunning) {
+    ALOGV("Waiting for configure thread to start");
+    mInputSignal.wait(mInputMutex);
+  }
+  return OK;
 }
 
 status_t EmulatedFakeCamera2::ConfigureThread::newRequestAvailable() {
-    waitUntilRunning();
+  waitUntilRunning();
 
-    Mutex::Autolock lock(mInputMutex);
+  Mutex::Autolock lock(mInputMutex);
 
-    mActive = true;
-    mInputSignal.signal();
+  mActive = true;
+  mInputSignal.signal();
 
-    return OK;
+  return OK;
 }
 
 bool EmulatedFakeCamera2::ConfigureThread::isStreamInUse(uint32_t id) {
-    Mutex::Autolock lock(mInternalsMutex);
+  Mutex::Autolock lock(mInternalsMutex);
 
-    if (mNextBuffers == NULL) return false;
-    for (size_t i=0; i < mNextBuffers->size(); i++) {
-        if ((*mNextBuffers)[i].streamId == (int)id) return true;
-    }
-    return false;
+  if (mNextBuffers == NULL) return false;
+  for (size_t i = 0; i < mNextBuffers->size(); i++) {
+    if ((*mNextBuffers)[i].streamId == (int)id) return true;
+  }
+  return false;
 }
 
 int EmulatedFakeCamera2::ConfigureThread::getInProgressCount() {
-    Mutex::Autolock lock(mInputMutex);
-    return mRequestCount;
+  Mutex::Autolock lock(mInputMutex);
+  return mRequestCount;
 }
 
 bool EmulatedFakeCamera2::ConfigureThread::threadLoop() {
-    status_t res;
+  status_t res;
 
-    // Check if we're currently processing or just waiting
-    {
-        Mutex::Autolock lock(mInputMutex);
-        if (!mActive) {
-            // Inactive, keep waiting until we've been signaled
-            status_t res;
-            res = mInputSignal.waitRelative(mInputMutex, kWaitPerLoop);
-            if (res != NO_ERROR && res != TIMED_OUT) {
-                ALOGE("%s: Error waiting for input requests: %d",
-                        __FUNCTION__, res);
-                return false;
-            }
-            if (!mActive) return true;
-            ALOGV("New request available");
-        }
-        // Active
+  // Check if we're currently processing or just waiting
+  {
+    Mutex::Autolock lock(mInputMutex);
+    if (!mActive) {
+      // Inactive, keep waiting until we've been signaled
+      status_t res;
+      res = mInputSignal.waitRelative(mInputMutex, kWaitPerLoop);
+      if (res != NO_ERROR && res != TIMED_OUT) {
+        ALOGE("%s: Error waiting for input requests: %d", __FUNCTION__, res);
+        return false;
+      }
+      if (!mActive) return true;
+      ALOGV("New request available");
     }
+    // Active
+  }
 
+  if (mRequest == NULL) {
+    Mutex::Autolock il(mInternalsMutex);
+
+    ALOGV("Configure: Getting next request");
+    res = mParent->mRequestQueueSrc->dequeue_request(mParent->mRequestQueueSrc,
+                                                     &mRequest);
+    if (res != NO_ERROR) {
+      ALOGE("%s: Error dequeuing next request: %d", __FUNCTION__, res);
+      mParent->signalError();
+      return false;
+    }
     if (mRequest == NULL) {
-        Mutex::Autolock il(mInternalsMutex);
-
-        ALOGV("Configure: Getting next request");
-        res = mParent->mRequestQueueSrc->dequeue_request(
-            mParent->mRequestQueueSrc,
-            &mRequest);
-        if (res != NO_ERROR) {
-            ALOGE("%s: Error dequeuing next request: %d", __FUNCTION__, res);
-            mParent->signalError();
-            return false;
-        }
-        if (mRequest == NULL) {
-            ALOGV("Configure: Request queue empty, going inactive");
-            // No requests available, go into inactive mode
-            Mutex::Autolock lock(mInputMutex);
-            mActive = false;
-            return true;
-        } else {
-            Mutex::Autolock lock(mInputMutex);
-            mRequestCount++;
-        }
-
-        camera_metadata_entry_t type;
-        res = find_camera_metadata_entry(mRequest,
-                ANDROID_REQUEST_TYPE,
-                &type);
-        if (res != NO_ERROR) {
-            ALOGE("%s: error reading request type", __FUNCTION__);
-            mParent->signalError();
-            return false;
-        }
-        bool success = false;;
-        switch (type.data.u8[0]) {
-            case ANDROID_REQUEST_TYPE_CAPTURE:
-                success = setupCapture();
-                break;
-            case ANDROID_REQUEST_TYPE_REPROCESS:
-                success = setupReprocess();
-                break;
-            default:
-                ALOGE("%s: Unexpected request type %d",
-                        __FUNCTION__, type.data.u8[0]);
-                mParent->signalError();
-                break;
-        }
-        if (!success) return false;
-
+      ALOGV("Configure: Request queue empty, going inactive");
+      // No requests available, go into inactive mode
+      Mutex::Autolock lock(mInputMutex);
+      mActive = false;
+      return true;
+    } else {
+      Mutex::Autolock lock(mInputMutex);
+      mRequestCount++;
     }
 
-    if (mWaitingForReadout) {
-        bool readoutDone;
-        readoutDone = mParent->mReadoutThread->waitForReady(kWaitPerLoop);
-        if (!readoutDone) return true;
-
-        if (mNextNeedsJpeg) {
-            ALOGV("Configure: Waiting for JPEG compressor");
-        } else {
-            ALOGV("Configure: Waiting for sensor");
-        }
-        mWaitingForReadout = false;
+    camera_metadata_entry_t type;
+    res = find_camera_metadata_entry(mRequest, ANDROID_REQUEST_TYPE, &type);
+    if (res != NO_ERROR) {
+      ALOGE("%s: error reading request type", __FUNCTION__);
+      mParent->signalError();
+      return false;
     }
+    bool success = false;
+    ;
+    switch (type.data.u8[0]) {
+      case ANDROID_REQUEST_TYPE_CAPTURE:
+        success = setupCapture();
+        break;
+      case ANDROID_REQUEST_TYPE_REPROCESS:
+        success = setupReprocess();
+        break;
+      default:
+        ALOGE("%s: Unexpected request type %d", __FUNCTION__, type.data.u8[0]);
+        mParent->signalError();
+        break;
+    }
+    if (!success) return false;
+  }
+
+  if (mWaitingForReadout) {
+    bool readoutDone;
+    readoutDone = mParent->mReadoutThread->waitForReady(kWaitPerLoop);
+    if (!readoutDone) return true;
 
     if (mNextNeedsJpeg) {
-        bool jpegDone;
-        jpegDone = mParent->mJpegCompressor->waitForDone(kWaitPerLoop);
-        if (!jpegDone) return true;
-
-        ALOGV("Configure: Waiting for sensor");
-        mNextNeedsJpeg = false;
-    }
-
-    if (mNextIsCapture) {
-        return configureNextCapture();
+      ALOGV("Configure: Waiting for JPEG compressor");
     } else {
-        return configureNextReprocess();
+      ALOGV("Configure: Waiting for sensor");
     }
+    mWaitingForReadout = false;
+  }
+
+  if (mNextNeedsJpeg) {
+    bool jpegDone;
+    jpegDone = mParent->mJpegCompressor->waitForDone(kWaitPerLoop);
+    if (!jpegDone) return true;
+
+    ALOGV("Configure: Waiting for sensor");
+    mNextNeedsJpeg = false;
+  }
+
+  if (mNextIsCapture) {
+    return configureNextCapture();
+  } else {
+    return configureNextReprocess();
+  }
 }
 
 bool EmulatedFakeCamera2::ConfigureThread::setupCapture() {
-    status_t res;
+  status_t res;
 
-    mNextIsCapture = true;
-    // Get necessary parameters for sensor config
-    mParent->mControlThread->processRequest(mRequest);
+  mNextIsCapture = true;
+  // Get necessary parameters for sensor config
+  mParent->mControlThread->processRequest(mRequest);
 
-    camera_metadata_entry_t streams;
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_REQUEST_OUTPUT_STREAMS,
-            &streams);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading output stream tag", __FUNCTION__);
-        mParent->signalError();
-        return false;
+  camera_metadata_entry_t streams;
+  res = find_camera_metadata_entry(mRequest, ANDROID_REQUEST_OUTPUT_STREAMS,
+                                   &streams);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading output stream tag", __FUNCTION__);
+    mParent->signalError();
+    return false;
+  }
+
+  mNextBuffers = new Buffers;
+  mNextNeedsJpeg = false;
+  ALOGV("Configure: Setting up buffers for capture");
+  for (size_t i = 0; i < streams.count; i++) {
+    int streamId = streams.data.i32[i];
+    const Stream &s = mParent->getStreamInfo(streamId);
+    if (s.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+      ALOGE(
+          "%s: Stream %d does not have a concrete pixel format, but "
+          "is included in a request!",
+          __FUNCTION__, streamId);
+      mParent->signalError();
+      return false;
     }
-
-    mNextBuffers = new Buffers;
-    mNextNeedsJpeg = false;
-    ALOGV("Configure: Setting up buffers for capture");
-    for (size_t i = 0; i < streams.count; i++) {
-        int streamId = streams.data.i32[i];
-        const Stream &s = mParent->getStreamInfo(streamId);
-        if (s.format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
-            ALOGE("%s: Stream %d does not have a concrete pixel format, but "
-                    "is included in a request!", __FUNCTION__, streamId);
-            mParent->signalError();
-            return false;
-        }
-        StreamBuffer b;
-        b.streamId = streamId; //streams.data.u8[i];
-        b.width  = s.width;
-        b.height = s.height;
-        b.format = s.format;
-        b.stride = s.stride;
-        mNextBuffers->push_back(b);
-        ALOGV("Configure:    Buffer %zu: Stream %d, %d x %d, format 0x%x, "
-                "stride %d",
-                i, b.streamId, b.width, b.height, b.format, b.stride);
-        if (b.format == HAL_PIXEL_FORMAT_BLOB) {
-            mNextNeedsJpeg = true;
-        }
+    StreamBuffer b;
+    b.streamId = streamId;  // streams.data.u8[i];
+    b.width = s.width;
+    b.height = s.height;
+    b.format = s.format;
+    b.stride = s.stride;
+    mNextBuffers->push_back(b);
+    ALOGV(
+        "Configure:    Buffer %zu: Stream %d, %d x %d, format 0x%x, "
+        "stride %d",
+        i, b.streamId, b.width, b.height, b.format, b.stride);
+    if (b.format == HAL_PIXEL_FORMAT_BLOB) {
+      mNextNeedsJpeg = true;
     }
+  }
 
-    camera_metadata_entry_t e;
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_REQUEST_FRAME_COUNT,
-            &e);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading frame count tag: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        mParent->signalError();
-        return false;
-    }
-    mNextFrameNumber = *e.data.i32;
+  camera_metadata_entry_t e;
+  res = find_camera_metadata_entry(mRequest, ANDROID_REQUEST_FRAME_COUNT, &e);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading frame count tag: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    mParent->signalError();
+    return false;
+  }
+  mNextFrameNumber = *e.data.i32;
 
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_SENSOR_EXPOSURE_TIME,
-            &e);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading exposure time tag: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        mParent->signalError();
-        return false;
-    }
-    mNextExposureTime = *e.data.i64;
+  res = find_camera_metadata_entry(mRequest, ANDROID_SENSOR_EXPOSURE_TIME, &e);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading exposure time tag: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    mParent->signalError();
+    return false;
+  }
+  mNextExposureTime = *e.data.i64;
 
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_SENSOR_FRAME_DURATION,
-            &e);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading frame duration tag", __FUNCTION__);
-        mParent->signalError();
-        return false;
-    }
-    mNextFrameDuration = *e.data.i64;
+  res = find_camera_metadata_entry(mRequest, ANDROID_SENSOR_FRAME_DURATION, &e);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading frame duration tag", __FUNCTION__);
+    mParent->signalError();
+    return false;
+  }
+  mNextFrameDuration = *e.data.i64;
 
-    if (mNextFrameDuration <
-            mNextExposureTime + Sensor::kMinVerticalBlank) {
-        mNextFrameDuration = mNextExposureTime + Sensor::kMinVerticalBlank;
-    }
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_SENSOR_SENSITIVITY,
-            &e);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading sensitivity tag", __FUNCTION__);
-        mParent->signalError();
-        return false;
-    }
-    mNextSensitivity = *e.data.i32;
+  if (mNextFrameDuration < mNextExposureTime + Sensor::kMinVerticalBlank) {
+    mNextFrameDuration = mNextExposureTime + Sensor::kMinVerticalBlank;
+  }
+  res = find_camera_metadata_entry(mRequest, ANDROID_SENSOR_SENSITIVITY, &e);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading sensitivity tag", __FUNCTION__);
+    mParent->signalError();
+    return false;
+  }
+  mNextSensitivity = *e.data.i32;
 
-    // Start waiting on readout thread
-    mWaitingForReadout = true;
-    ALOGV("Configure: Waiting for readout thread");
+  // Start waiting on readout thread
+  mWaitingForReadout = true;
+  ALOGV("Configure: Waiting for readout thread");
 
-    return true;
+  return true;
 }
 
 bool EmulatedFakeCamera2::ConfigureThread::configureNextCapture() {
-    bool vsync = mParent->mSensor->waitForVSync(kWaitPerLoop);
-    if (!vsync) return true;
+  bool vsync = mParent->mSensor->waitForVSync(kWaitPerLoop);
+  if (!vsync) return true;
 
-    Mutex::Autolock il(mInternalsMutex);
-    ALOGV("Configure: Configuring sensor for capture %d", mNextFrameNumber);
-    mParent->mSensor->setExposureTime(mNextExposureTime);
-    mParent->mSensor->setFrameDuration(mNextFrameDuration);
-    mParent->mSensor->setSensitivity(mNextSensitivity);
+  Mutex::Autolock il(mInternalsMutex);
+  ALOGV("Configure: Configuring sensor for capture %d", mNextFrameNumber);
+  mParent->mSensor->setExposureTime(mNextExposureTime);
+  mParent->mSensor->setFrameDuration(mNextFrameDuration);
+  mParent->mSensor->setSensitivity(mNextSensitivity);
 
-    getBuffers();
+  getBuffers();
 
-    ALOGV("Configure: Done configure for capture %d", mNextFrameNumber);
-    mParent->mReadoutThread->setNextOperation(true, mRequest, mNextBuffers);
-    mParent->mSensor->setDestinationBuffers(mNextBuffers);
+  ALOGV("Configure: Done configure for capture %d", mNextFrameNumber);
+  mParent->mReadoutThread->setNextOperation(true, mRequest, mNextBuffers);
+  mParent->mSensor->setDestinationBuffers(mNextBuffers);
 
-    mRequest = NULL;
-    mNextBuffers = NULL;
+  mRequest = NULL;
+  mNextBuffers = NULL;
 
-    Mutex::Autolock lock(mInputMutex);
-    mRequestCount--;
+  Mutex::Autolock lock(mInputMutex);
+  mRequestCount--;
 
-    return true;
+  return true;
 }
 
 bool EmulatedFakeCamera2::ConfigureThread::setupReprocess() {
-    status_t res;
+  status_t res;
 
-    mNextNeedsJpeg = true;
-    mNextIsCapture = false;
+  mNextNeedsJpeg = true;
+  mNextIsCapture = false;
 
-    camera_metadata_entry_t reprocessStreams;
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_REQUEST_INPUT_STREAMS,
-            &reprocessStreams);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading output stream tag", __FUNCTION__);
-        mParent->signalError();
-        return false;
+  camera_metadata_entry_t reprocessStreams;
+  res = find_camera_metadata_entry(mRequest, ANDROID_REQUEST_INPUT_STREAMS,
+                                   &reprocessStreams);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading output stream tag", __FUNCTION__);
+    mParent->signalError();
+    return false;
+  }
+
+  mNextBuffers = new Buffers;
+
+  ALOGV("Configure: Setting up input buffers for reprocess");
+  for (size_t i = 0; i < reprocessStreams.count; i++) {
+    int streamId = reprocessStreams.data.i32[i];
+    const ReprocessStream &s = mParent->getReprocessStreamInfo(streamId);
+    if (s.format != HAL_PIXEL_FORMAT_RGB_888) {
+      ALOGE("%s: Only ZSL reprocessing supported!", __FUNCTION__);
+      mParent->signalError();
+      return false;
     }
+    StreamBuffer b;
+    b.streamId = -streamId;
+    b.width = s.width;
+    b.height = s.height;
+    b.format = s.format;
+    b.stride = s.stride;
+    mNextBuffers->push_back(b);
+  }
 
-    mNextBuffers = new Buffers;
+  camera_metadata_entry_t streams;
+  res = find_camera_metadata_entry(mRequest, ANDROID_REQUEST_OUTPUT_STREAMS,
+                                   &streams);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading output stream tag", __FUNCTION__);
+    mParent->signalError();
+    return false;
+  }
 
-    ALOGV("Configure: Setting up input buffers for reprocess");
-    for (size_t i = 0; i < reprocessStreams.count; i++) {
-        int streamId = reprocessStreams.data.i32[i];
-        const ReprocessStream &s = mParent->getReprocessStreamInfo(streamId);
-        if (s.format != HAL_PIXEL_FORMAT_RGB_888) {
-            ALOGE("%s: Only ZSL reprocessing supported!",
-                    __FUNCTION__);
-            mParent->signalError();
-            return false;
-        }
-        StreamBuffer b;
-        b.streamId = -streamId;
-        b.width = s.width;
-        b.height = s.height;
-        b.format = s.format;
-        b.stride = s.stride;
-        mNextBuffers->push_back(b);
+  ALOGV("Configure: Setting up output buffers for reprocess");
+  for (size_t i = 0; i < streams.count; i++) {
+    int streamId = streams.data.i32[i];
+    const Stream &s = mParent->getStreamInfo(streamId);
+    if (s.format != HAL_PIXEL_FORMAT_BLOB) {
+      // TODO: Support reprocess to YUV
+      ALOGE("%s: Non-JPEG output stream %d for reprocess not supported",
+            __FUNCTION__, streamId);
+      mParent->signalError();
+      return false;
     }
+    StreamBuffer b;
+    b.streamId = streams.data.u8[i];
+    b.width = s.width;
+    b.height = s.height;
+    b.format = s.format;
+    b.stride = s.stride;
+    mNextBuffers->push_back(b);
+    ALOGV(
+        "Configure:    Buffer %zu: Stream %d, %d x %d, format 0x%x, "
+        "stride %d",
+        i, b.streamId, b.width, b.height, b.format, b.stride);
+  }
 
-    camera_metadata_entry_t streams;
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_REQUEST_OUTPUT_STREAMS,
-            &streams);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading output stream tag", __FUNCTION__);
-        mParent->signalError();
-        return false;
-    }
+  camera_metadata_entry_t e;
+  res = find_camera_metadata_entry(mRequest, ANDROID_REQUEST_FRAME_COUNT, &e);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading frame count tag: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    mParent->signalError();
+    return false;
+  }
+  mNextFrameNumber = *e.data.i32;
 
-    ALOGV("Configure: Setting up output buffers for reprocess");
-    for (size_t i = 0; i < streams.count; i++) {
-        int streamId = streams.data.i32[i];
-        const Stream &s = mParent->getStreamInfo(streamId);
-        if (s.format != HAL_PIXEL_FORMAT_BLOB) {
-            // TODO: Support reprocess to YUV
-            ALOGE("%s: Non-JPEG output stream %d for reprocess not supported",
-                    __FUNCTION__, streamId);
-            mParent->signalError();
-            return false;
-        }
-        StreamBuffer b;
-        b.streamId = streams.data.u8[i];
-        b.width  = s.width;
-        b.height = s.height;
-        b.format = s.format;
-        b.stride = s.stride;
-        mNextBuffers->push_back(b);
-        ALOGV("Configure:    Buffer %zu: Stream %d, %d x %d, format 0x%x, "
-                "stride %d",
-                i, b.streamId, b.width, b.height, b.format, b.stride);
-    }
-
-    camera_metadata_entry_t e;
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_REQUEST_FRAME_COUNT,
-            &e);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading frame count tag: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        mParent->signalError();
-        return false;
-    }
-    mNextFrameNumber = *e.data.i32;
-
-    return true;
+  return true;
 }
 
 bool EmulatedFakeCamera2::ConfigureThread::configureNextReprocess() {
-    Mutex::Autolock il(mInternalsMutex);
+  Mutex::Autolock il(mInternalsMutex);
 
-    getBuffers();
+  getBuffers();
 
-    ALOGV("Configure: Done configure for reprocess %d", mNextFrameNumber);
-    mParent->mReadoutThread->setNextOperation(false, mRequest, mNextBuffers);
+  ALOGV("Configure: Done configure for reprocess %d", mNextFrameNumber);
+  mParent->mReadoutThread->setNextOperation(false, mRequest, mNextBuffers);
 
-    mRequest = NULL;
-    mNextBuffers = NULL;
+  mRequest = NULL;
+  mNextBuffers = NULL;
 
-    Mutex::Autolock lock(mInputMutex);
-    mRequestCount--;
+  Mutex::Autolock lock(mInputMutex);
+  mRequestCount--;
 
-    return true;
+  return true;
 }
 
 bool EmulatedFakeCamera2::ConfigureThread::getBuffers() {
-    status_t res;
-    /** Get buffers to fill for this frame */
-    for (size_t i = 0; i < mNextBuffers->size(); i++) {
-        StreamBuffer &b = mNextBuffers->editItemAt(i);
+  status_t res;
+  /** Get buffers to fill for this frame */
+  for (size_t i = 0; i < mNextBuffers->size(); i++) {
+    StreamBuffer &b = mNextBuffers->editItemAt(i);
 
-        if (b.streamId > 0) {
-            ALOGV("Configure: Dequeing buffer from stream %d", b.streamId);
-            Stream s = mParent->getStreamInfo(b.streamId);
-            res = s.ops->dequeue_buffer(s.ops, &(b.buffer) );
-            if (res != NO_ERROR || b.buffer == NULL) {
-                ALOGE("%s: Unable to dequeue buffer from stream %d: %s (%d)",
-                        __FUNCTION__, b.streamId, strerror(-res), res);
-                mParent->signalError();
-                return false;
-            }
+    if (b.streamId > 0) {
+      ALOGV("Configure: Dequeing buffer from stream %d", b.streamId);
+      Stream s = mParent->getStreamInfo(b.streamId);
+      res = s.ops->dequeue_buffer(s.ops, &(b.buffer));
+      if (res != NO_ERROR || b.buffer == NULL) {
+        ALOGE("%s: Unable to dequeue buffer from stream %d: %s (%d)",
+              __FUNCTION__, b.streamId, strerror(-res), res);
+        mParent->signalError();
+        return false;
+      }
 
-            /* Lock the buffer from the perspective of the graphics mapper */
-            res = GrallocModule::getInstance().lock(*(b.buffer),
-                    GRALLOC_USAGE_HW_CAMERA_WRITE,
-                    0, 0, s.width, s.height,
-                    (void**)&(b.img));
+      /* Lock the buffer from the perspective of the graphics mapper */
+      res = GrallocModule::getInstance().lock(
+          *(b.buffer), GRALLOC_USAGE_HW_CAMERA_WRITE, 0, 0, s.width, s.height,
+          (void **)&(b.img));
 
+      if (res != NO_ERROR) {
+        ALOGE("%s: grbuffer_mapper.lock failure: %s (%d)", __FUNCTION__,
+              strerror(-res), res);
+        s.ops->cancel_buffer(s.ops, b.buffer);
+        mParent->signalError();
+        return false;
+      }
+    } else {
+      ALOGV("Configure: Acquiring buffer from reprocess stream %d",
+            -b.streamId);
+      ReprocessStream s = mParent->getReprocessStreamInfo(-b.streamId);
+      res = s.ops->acquire_buffer(s.ops, &(b.buffer));
+      if (res != NO_ERROR || b.buffer == NULL) {
+        ALOGE(
+            "%s: Unable to acquire buffer from reprocess stream %d: "
+            "%s (%d)",
+            __FUNCTION__, -b.streamId, strerror(-res), res);
+        mParent->signalError();
+        return false;
+      }
 
-            if (res != NO_ERROR) {
-                ALOGE("%s: grbuffer_mapper.lock failure: %s (%d)",
-                        __FUNCTION__, strerror(-res), res);
-                s.ops->cancel_buffer(s.ops,
-                        b.buffer);
-                mParent->signalError();
-                return false;
-            }
-        } else {
-            ALOGV("Configure: Acquiring buffer from reprocess stream %d",
-                    -b.streamId);
-            ReprocessStream s = mParent->getReprocessStreamInfo(-b.streamId);
-            res = s.ops->acquire_buffer(s.ops, &(b.buffer) );
-            if (res != NO_ERROR || b.buffer == NULL) {
-                ALOGE("%s: Unable to acquire buffer from reprocess stream %d: "
-                        "%s (%d)", __FUNCTION__, -b.streamId,
-                        strerror(-res), res);
-                mParent->signalError();
-                return false;
-            }
-
-            /* Lock the buffer from the perspective of the graphics mapper */
-            res = GrallocModule::getInstance().lock(*(b.buffer),
-                    GRALLOC_USAGE_HW_CAMERA_READ,
-                    0, 0, s.width, s.height,
-                    (void**)&(b.img) );
-            if (res != NO_ERROR) {
-                ALOGE("%s: grbuffer_mapper.lock failure: %s (%d)",
-                        __FUNCTION__, strerror(-res), res);
-                s.ops->release_buffer(s.ops,
-                        b.buffer);
-                mParent->signalError();
-                return false;
-            }
-        }
+      /* Lock the buffer from the perspective of the graphics mapper */
+      res = GrallocModule::getInstance().lock(
+          *(b.buffer), GRALLOC_USAGE_HW_CAMERA_READ, 0, 0, s.width, s.height,
+          (void **)&(b.img));
+      if (res != NO_ERROR) {
+        ALOGE("%s: grbuffer_mapper.lock failure: %s (%d)", __FUNCTION__,
+              strerror(-res), res);
+        s.ops->release_buffer(s.ops, b.buffer);
+        mParent->signalError();
+        return false;
+      }
     }
-    return true;
+  }
+  return true;
 }
 
-EmulatedFakeCamera2::ReadoutThread::ReadoutThread(EmulatedFakeCamera2 *parent):
-        Thread(false),
-        mParent(parent),
-        mRunning(false),
-        mActive(false),
-        mRequestCount(0),
-        mRequest(NULL),
-        mBuffers(NULL) {
-    mInFlightQueue = new InFlightQueue[kInFlightQueueSize];
-    mInFlightHead = 0;
-    mInFlightTail = 0;
+EmulatedFakeCamera2::ReadoutThread::ReadoutThread(EmulatedFakeCamera2 *parent)
+    : Thread(false),
+      mParent(parent),
+      mRunning(false),
+      mActive(false),
+      mRequestCount(0),
+      mRequest(NULL),
+      mBuffers(NULL) {
+  mInFlightQueue = new InFlightQueue[kInFlightQueueSize];
+  mInFlightHead = 0;
+  mInFlightTail = 0;
 }
 
 EmulatedFakeCamera2::ReadoutThread::~ReadoutThread() {
-    delete[] mInFlightQueue;
+  delete[] mInFlightQueue;
 }
 
 status_t EmulatedFakeCamera2::ReadoutThread::readyToRun() {
-    Mutex::Autolock lock(mInputMutex);
-    ALOGV("Starting up ReadoutThread");
-    mRunning = true;
-    mInputSignal.signal();
-    return NO_ERROR;
+  Mutex::Autolock lock(mInputMutex);
+  ALOGV("Starting up ReadoutThread");
+  mRunning = true;
+  mInputSignal.signal();
+  return NO_ERROR;
 }
 
 status_t EmulatedFakeCamera2::ReadoutThread::waitUntilRunning() {
-    Mutex::Autolock lock(mInputMutex);
-    if (!mRunning) {
-        ALOGV("Waiting for readout thread to start");
-        mInputSignal.wait(mInputMutex);
-    }
-    return OK;
+  Mutex::Autolock lock(mInputMutex);
+  if (!mRunning) {
+    ALOGV("Waiting for readout thread to start");
+    mInputSignal.wait(mInputMutex);
+  }
+  return OK;
 }
 
 bool EmulatedFakeCamera2::ReadoutThread::waitForReady(nsecs_t timeout) {
-    status_t res;
-    Mutex::Autolock lock(mInputMutex);
-    while (!readyForNextCapture()) {
-        res = mReadySignal.waitRelative(mInputMutex, timeout);
-        if (res == TIMED_OUT) return false;
-        if (res != OK) {
-            ALOGE("%s: Error waiting for ready: %s (%d)", __FUNCTION__,
-                    strerror(-res), res);
-            return false;
-        }
+  status_t res;
+  Mutex::Autolock lock(mInputMutex);
+  while (!readyForNextCapture()) {
+    res = mReadySignal.waitRelative(mInputMutex, timeout);
+    if (res == TIMED_OUT) return false;
+    if (res != OK) {
+      ALOGE("%s: Error waiting for ready: %s (%d)", __FUNCTION__,
+            strerror(-res), res);
+      return false;
     }
-    return true;
+  }
+  return true;
 }
 
 bool EmulatedFakeCamera2::ReadoutThread::readyForNextCapture() {
-    return (mInFlightTail + 1) % kInFlightQueueSize != mInFlightHead;
+  return (mInFlightTail + 1) % kInFlightQueueSize != mInFlightHead;
 }
 
 void EmulatedFakeCamera2::ReadoutThread::setNextOperation(
-        bool isCapture,
-        camera_metadata_t *request,
-        Buffers *buffers) {
-    Mutex::Autolock lock(mInputMutex);
-    if ( !readyForNextCapture() ) {
-        ALOGE("In flight queue full, dropping captures");
-        mParent->signalError();
-        return;
-    }
-    mInFlightQueue[mInFlightTail].isCapture = isCapture;
-    mInFlightQueue[mInFlightTail].request = request;
-    mInFlightQueue[mInFlightTail].buffers = buffers;
-    mInFlightTail = (mInFlightTail + 1) % kInFlightQueueSize;
-    mRequestCount++;
+    bool isCapture, camera_metadata_t *request, Buffers *buffers) {
+  Mutex::Autolock lock(mInputMutex);
+  if (!readyForNextCapture()) {
+    ALOGE("In flight queue full, dropping captures");
+    mParent->signalError();
+    return;
+  }
+  mInFlightQueue[mInFlightTail].isCapture = isCapture;
+  mInFlightQueue[mInFlightTail].request = request;
+  mInFlightQueue[mInFlightTail].buffers = buffers;
+  mInFlightTail = (mInFlightTail + 1) % kInFlightQueueSize;
+  mRequestCount++;
 
-    if (!mActive) {
-        mActive = true;
-        mInputSignal.signal();
-    }
+  if (!mActive) {
+    mActive = true;
+    mInputSignal.signal();
+  }
 }
 
 bool EmulatedFakeCamera2::ReadoutThread::isStreamInUse(uint32_t id) {
-    // acquire in same order as threadLoop
-    Mutex::Autolock iLock(mInternalsMutex);
-    Mutex::Autolock lock(mInputMutex);
+  // acquire in same order as threadLoop
+  Mutex::Autolock iLock(mInternalsMutex);
+  Mutex::Autolock lock(mInputMutex);
 
-    size_t i = mInFlightHead;
-    while (i != mInFlightTail) {
-        for (size_t j = 0; j < mInFlightQueue[i].buffers->size(); j++) {
-            if ( (*(mInFlightQueue[i].buffers))[j].streamId == (int)id )
-                return true;
-        }
-        i = (i + 1) % kInFlightQueueSize;
+  size_t i = mInFlightHead;
+  while (i != mInFlightTail) {
+    for (size_t j = 0; j < mInFlightQueue[i].buffers->size(); j++) {
+      if ((*(mInFlightQueue[i].buffers))[j].streamId == (int)id) return true;
     }
+    i = (i + 1) % kInFlightQueueSize;
+  }
 
-
-    if (mBuffers != NULL) {
-        for (i = 0; i < mBuffers->size(); i++) {
-            if ( (*mBuffers)[i].streamId == (int)id) return true;
-        }
+  if (mBuffers != NULL) {
+    for (i = 0; i < mBuffers->size(); i++) {
+      if ((*mBuffers)[i].streamId == (int)id) return true;
     }
+  }
 
-    return false;
+  return false;
 }
 
 int EmulatedFakeCamera2::ReadoutThread::getInProgressCount() {
-    Mutex::Autolock lock(mInputMutex);
+  Mutex::Autolock lock(mInputMutex);
 
-    return mRequestCount;
+  return mRequestCount;
 }
 
 bool EmulatedFakeCamera2::ReadoutThread::threadLoop() {
-    static const nsecs_t kWaitPerLoop = 10000000L; // 10 ms
-    status_t res;
-    int32_t frameNumber;
+  static const nsecs_t kWaitPerLoop = 10000000L;  // 10 ms
+  status_t res;
+  int32_t frameNumber;
 
-    // Check if we're currently processing or just waiting
-    {
-        Mutex::Autolock lock(mInputMutex);
-        if (!mActive) {
-            // Inactive, keep waiting until we've been signaled
-            res = mInputSignal.waitRelative(mInputMutex, kWaitPerLoop);
-            if (res != NO_ERROR && res != TIMED_OUT) {
-                ALOGE("%s: Error waiting for capture requests: %d",
-                        __FUNCTION__, res);
-                mParent->signalError();
-                return false;
-            }
-            if (!mActive) return true;
-        }
-        // Active, see if we need a new request
-        if (mRequest == NULL) {
-            if (mInFlightHead == mInFlightTail) {
-                // Go inactive
-                ALOGV("Waiting for sensor data");
-                mActive = false;
-                return true;
-            } else {
-                Mutex::Autolock iLock(mInternalsMutex);
-                mReadySignal.signal();
-                mIsCapture = mInFlightQueue[mInFlightHead].isCapture;
-                mRequest = mInFlightQueue[mInFlightHead].request;
-                mBuffers  = mInFlightQueue[mInFlightHead].buffers;
-                mInFlightQueue[mInFlightHead].request = NULL;
-                mInFlightQueue[mInFlightHead].buffers = NULL;
-                mInFlightHead = (mInFlightHead + 1) % kInFlightQueueSize;
-                ALOGV("Ready to read out request %p, %zu buffers",
-                        mRequest, mBuffers->size());
-            }
-        }
+  // Check if we're currently processing or just waiting
+  {
+    Mutex::Autolock lock(mInputMutex);
+    if (!mActive) {
+      // Inactive, keep waiting until we've been signaled
+      res = mInputSignal.waitRelative(mInputMutex, kWaitPerLoop);
+      if (res != NO_ERROR && res != TIMED_OUT) {
+        ALOGE("%s: Error waiting for capture requests: %d", __FUNCTION__, res);
+        mParent->signalError();
+        return false;
+      }
+      if (!mActive) return true;
+    }
+    // Active, see if we need a new request
+    if (mRequest == NULL) {
+      if (mInFlightHead == mInFlightTail) {
+        // Go inactive
+        ALOGV("Waiting for sensor data");
+        mActive = false;
+        return true;
+      } else {
+        Mutex::Autolock iLock(mInternalsMutex);
+        mReadySignal.signal();
+        mIsCapture = mInFlightQueue[mInFlightHead].isCapture;
+        mRequest = mInFlightQueue[mInFlightHead].request;
+        mBuffers = mInFlightQueue[mInFlightHead].buffers;
+        mInFlightQueue[mInFlightHead].request = NULL;
+        mInFlightQueue[mInFlightHead].buffers = NULL;
+        mInFlightHead = (mInFlightHead + 1) % kInFlightQueueSize;
+        ALOGV("Ready to read out request %p, %zu buffers", mRequest,
+              mBuffers->size());
+      }
+    }
+  }
+
+  // Active with request, wait on sensor to complete
+
+  nsecs_t captureTime;
+
+  if (mIsCapture) {
+    bool gotFrame;
+    gotFrame = mParent->mSensor->waitForNewFrame(kWaitPerLoop, &captureTime);
+
+    if (!gotFrame) return true;
+  }
+
+  Mutex::Autolock iLock(mInternalsMutex);
+
+  camera_metadata_entry_t entry;
+  if (!mIsCapture) {
+    res =
+        find_camera_metadata_entry(mRequest, ANDROID_SENSOR_TIMESTAMP, &entry);
+    if (res != NO_ERROR) {
+      ALOGE("%s: error reading reprocessing timestamp: %s (%d)", __FUNCTION__,
+            strerror(-res), res);
+      mParent->signalError();
+      return false;
+    }
+    captureTime = entry.data.i64[0];
+  }
+
+  res =
+      find_camera_metadata_entry(mRequest, ANDROID_REQUEST_FRAME_COUNT, &entry);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading frame count tag: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    mParent->signalError();
+    return false;
+  }
+  frameNumber = *entry.data.i32;
+
+  res = find_camera_metadata_entry(mRequest, ANDROID_REQUEST_METADATA_MODE,
+                                   &entry);
+  if (res != NO_ERROR) {
+    ALOGE("%s: error reading metadata mode tag: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    mParent->signalError();
+    return false;
+  }
+
+  // Got sensor data and request, construct frame and send it out
+  ALOGV("Readout: Constructing metadata and frames for request %d",
+        frameNumber);
+
+  if (*entry.data.u8 == ANDROID_REQUEST_METADATA_MODE_FULL) {
+    ALOGV("Readout: Metadata requested, constructing");
+
+    camera_metadata_t *frame = NULL;
+
+    size_t frame_entries = get_camera_metadata_entry_count(mRequest);
+    size_t frame_data = get_camera_metadata_data_count(mRequest);
+
+    // TODO: Dynamically calculate based on enabled statistics, etc
+    frame_entries += 10;
+    frame_data += 100;
+
+    res = mParent->mFrameQueueDst->dequeue_frame(
+        mParent->mFrameQueueDst, frame_entries, frame_data, &frame);
+
+    if (res != NO_ERROR || frame == NULL) {
+      ALOGE("%s: Unable to dequeue frame metadata buffer", __FUNCTION__);
+      mParent->signalError();
+      return false;
     }
 
-    // Active with request, wait on sensor to complete
-
-    nsecs_t captureTime;
+    res = append_camera_metadata(frame, mRequest);
+    if (res != NO_ERROR) {
+      ALOGE("Unable to append request metadata");
+    }
 
     if (mIsCapture) {
-        bool gotFrame;
-        gotFrame = mParent->mSensor->waitForNewFrame(kWaitPerLoop,
-                &captureTime);
+      add_camera_metadata_entry(frame, ANDROID_SENSOR_TIMESTAMP, &captureTime,
+                                1);
 
-        if (!gotFrame) return true;
+      collectStatisticsMetadata(frame);
+      // TODO: Collect all final values used from sensor in addition to
+      // timestamp
     }
 
-    Mutex::Autolock iLock(mInternalsMutex);
+    ALOGV("Readout: Enqueue frame %d", frameNumber);
+    mParent->mFrameQueueDst->enqueue_frame(mParent->mFrameQueueDst, frame);
+  }
+  ALOGV("Readout: Free request");
+  res = mParent->mRequestQueueSrc->free_request(mParent->mRequestQueueSrc,
+                                                mRequest);
+  if (res != NO_ERROR) {
+    ALOGE("%s: Unable to return request buffer to queue: %d", __FUNCTION__,
+          res);
+    mParent->signalError();
+    return false;
+  }
+  mRequest = NULL;
 
-    camera_metadata_entry_t entry;
-    if (!mIsCapture) {
-        res = find_camera_metadata_entry(mRequest,
-                ANDROID_SENSOR_TIMESTAMP,
-            &entry);
-        if (res != NO_ERROR) {
-            ALOGE("%s: error reading reprocessing timestamp: %s (%d)",
-                    __FUNCTION__, strerror(-res), res);
-            mParent->signalError();
-            return false;
+  int compressedBufferIndex = -1;
+  ALOGV("Readout: Processing %zu buffers", mBuffers->size());
+  for (size_t i = 0; i < mBuffers->size(); i++) {
+    const StreamBuffer &b = (*mBuffers)[i];
+    ALOGV("Readout:    Buffer %zu: Stream %d, %d x %d, format 0x%x, stride %d",
+          i, b.streamId, b.width, b.height, b.format, b.stride);
+    if (b.streamId > 0) {
+      if (b.format == HAL_PIXEL_FORMAT_BLOB) {
+        // Assumes only one BLOB buffer type per capture
+        compressedBufferIndex = i;
+      } else {
+        ALOGV("Readout:    Sending image buffer %zu (%p) to output stream %d",
+              i, (void *)*(b.buffer), b.streamId);
+        GrallocModule::getInstance().unlock(*(b.buffer));
+        const Stream &s = mParent->getStreamInfo(b.streamId);
+        res = s.ops->enqueue_buffer(s.ops, captureTime, b.buffer);
+        if (res != OK) {
+          ALOGE("Error enqueuing image buffer %p: %s (%d)", b.buffer,
+                strerror(-res), res);
+          mParent->signalError();
         }
-        captureTime = entry.data.i64[0];
+      }
     }
+  }
 
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_REQUEST_FRAME_COUNT,
-            &entry);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading frame count tag: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        mParent->signalError();
-        return false;
-    }
-    frameNumber = *entry.data.i32;
+  if (compressedBufferIndex == -1) {
+    delete mBuffers;
+  } else {
+    ALOGV("Readout:  Starting JPEG compression for buffer %d, stream %d",
+          compressedBufferIndex, (*mBuffers)[compressedBufferIndex].streamId);
+    mJpegTimestamp = captureTime;
+    // Takes ownership of mBuffers
+    mParent->mJpegCompressor->start(mBuffers, this);
+  }
+  mBuffers = NULL;
 
-    res = find_camera_metadata_entry(mRequest,
-            ANDROID_REQUEST_METADATA_MODE,
-            &entry);
-    if (res != NO_ERROR) {
-        ALOGE("%s: error reading metadata mode tag: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        mParent->signalError();
-        return false;
-    }
-
-    // Got sensor data and request, construct frame and send it out
-    ALOGV("Readout: Constructing metadata and frames for request %d",
-            frameNumber);
-
-    if (*entry.data.u8 == ANDROID_REQUEST_METADATA_MODE_FULL) {
-        ALOGV("Readout: Metadata requested, constructing");
-
-        camera_metadata_t *frame = NULL;
-
-        size_t frame_entries = get_camera_metadata_entry_count(mRequest);
-        size_t frame_data    = get_camera_metadata_data_count(mRequest);
-
-        // TODO: Dynamically calculate based on enabled statistics, etc
-        frame_entries += 10;
-        frame_data += 100;
-
-        res = mParent->mFrameQueueDst->dequeue_frame(mParent->mFrameQueueDst,
-                frame_entries, frame_data, &frame);
-
-        if (res != NO_ERROR || frame == NULL) {
-            ALOGE("%s: Unable to dequeue frame metadata buffer", __FUNCTION__);
-            mParent->signalError();
-            return false;
-        }
-
-        res = append_camera_metadata(frame, mRequest);
-        if (res != NO_ERROR) {
-            ALOGE("Unable to append request metadata");
-        }
-
-        if (mIsCapture) {
-            add_camera_metadata_entry(frame,
-                    ANDROID_SENSOR_TIMESTAMP,
-                    &captureTime,
-                    1);
-
-            collectStatisticsMetadata(frame);
-            // TODO: Collect all final values used from sensor in addition to timestamp
-        }
-
-        ALOGV("Readout: Enqueue frame %d", frameNumber);
-        mParent->mFrameQueueDst->enqueue_frame(mParent->mFrameQueueDst,
-                frame);
-    }
-    ALOGV("Readout: Free request");
-    res = mParent->mRequestQueueSrc->free_request(mParent->mRequestQueueSrc, mRequest);
-    if (res != NO_ERROR) {
-        ALOGE("%s: Unable to return request buffer to queue: %d",
-                __FUNCTION__, res);
-        mParent->signalError();
-        return false;
-    }
-    mRequest = NULL;
-
-    int compressedBufferIndex = -1;
-    ALOGV("Readout: Processing %zu buffers", mBuffers->size());
-    for (size_t i = 0; i < mBuffers->size(); i++) {
-        const StreamBuffer &b = (*mBuffers)[i];
-        ALOGV("Readout:    Buffer %zu: Stream %d, %d x %d, format 0x%x, stride %d",
-                i, b.streamId, b.width, b.height, b.format, b.stride);
-        if (b.streamId > 0) {
-            if (b.format == HAL_PIXEL_FORMAT_BLOB) {
-                // Assumes only one BLOB buffer type per capture
-                compressedBufferIndex = i;
-            } else {
-                ALOGV("Readout:    Sending image buffer %zu (%p) to output stream %d",
-                        i, (void*)*(b.buffer), b.streamId);
-                GrallocModule::getInstance().unlock(*(b.buffer));
-                const Stream &s = mParent->getStreamInfo(b.streamId);
-                res = s.ops->enqueue_buffer(s.ops, captureTime, b.buffer);
-                if (res != OK) {
-                    ALOGE("Error enqueuing image buffer %p: %s (%d)", b.buffer,
-                            strerror(-res), res);
-                    mParent->signalError();
-                }
-            }
-        }
-    }
-
-    if (compressedBufferIndex == -1) {
-        delete mBuffers;
-    } else {
-        ALOGV("Readout:  Starting JPEG compression for buffer %d, stream %d",
-                compressedBufferIndex,
-                (*mBuffers)[compressedBufferIndex].streamId);
-        mJpegTimestamp = captureTime;
-        // Takes ownership of mBuffers
-        mParent->mJpegCompressor->start(mBuffers, this);
-    }
-    mBuffers = NULL;
-
-    Mutex::Autolock l(mInputMutex);
-    mRequestCount--;
-    ALOGV("Readout: Done with request %d", frameNumber);
-    return true;
+  Mutex::Autolock l(mInputMutex);
+  mRequestCount--;
+  ALOGV("Readout: Done with request %d", frameNumber);
+  return true;
 }
 
 void EmulatedFakeCamera2::ReadoutThread::onJpegDone(
-        const StreamBuffer &jpegBuffer, bool success) {
-    status_t res;
-    if (!success) {
-        ALOGE("%s: Error queueing compressed image buffer %p",
-                __FUNCTION__, jpegBuffer.buffer);
-        mParent->signalError();
-        return;
-    }
+    const StreamBuffer &jpegBuffer, bool success) {
+  status_t res;
+  if (!success) {
+    ALOGE("%s: Error queueing compressed image buffer %p", __FUNCTION__,
+          jpegBuffer.buffer);
+    mParent->signalError();
+    return;
+  }
 
-    // Write to JPEG output stream
-    ALOGV("%s: Compression complete, pushing to stream %d", __FUNCTION__,
-            jpegBuffer.streamId);
+  // Write to JPEG output stream
+  ALOGV("%s: Compression complete, pushing to stream %d", __FUNCTION__,
+        jpegBuffer.streamId);
 
-    GrallocModule::getInstance().unlock(*(jpegBuffer.buffer));
-    const Stream &s = mParent->getStreamInfo(jpegBuffer.streamId);
-    res = s.ops->enqueue_buffer(s.ops, mJpegTimestamp, jpegBuffer.buffer);
+  GrallocModule::getInstance().unlock(*(jpegBuffer.buffer));
+  const Stream &s = mParent->getStreamInfo(jpegBuffer.streamId);
+  res = s.ops->enqueue_buffer(s.ops, mJpegTimestamp, jpegBuffer.buffer);
 }
 
 void EmulatedFakeCamera2::ReadoutThread::onJpegInputDone(
-        const StreamBuffer &inputBuffer) {
-    status_t res;
-    GrallocModule::getInstance().unlock(*(inputBuffer.buffer));
-    const ReprocessStream &s =
-            mParent->getReprocessStreamInfo(-inputBuffer.streamId);
-    res = s.ops->release_buffer(s.ops, inputBuffer.buffer);
-    if (res != OK) {
-        ALOGE("Error releasing reprocess buffer %p: %s (%d)",
-                inputBuffer.buffer, strerror(-res), res);
-        mParent->signalError();
-    }
+    const StreamBuffer &inputBuffer) {
+  status_t res;
+  GrallocModule::getInstance().unlock(*(inputBuffer.buffer));
+  const ReprocessStream &s =
+      mParent->getReprocessStreamInfo(-inputBuffer.streamId);
+  res = s.ops->release_buffer(s.ops, inputBuffer.buffer);
+  if (res != OK) {
+    ALOGE("Error releasing reprocess buffer %p: %s (%d)", inputBuffer.buffer,
+          strerror(-res), res);
+    mParent->signalError();
+  }
 }
 
 status_t EmulatedFakeCamera2::ReadoutThread::collectStatisticsMetadata(
-        camera_metadata_t *frame) {
-    // Completely fake face rectangles, don't correspond to real faces in scene
-    ALOGV("Readout:    Collecting statistics metadata");
+    camera_metadata_t *frame) {
+  // Completely fake face rectangles, don't correspond to real faces in scene
+  ALOGV("Readout:    Collecting statistics metadata");
 
-    status_t res;
-    camera_metadata_entry_t entry;
-    res = find_camera_metadata_entry(frame,
-                ANDROID_STATISTICS_FACE_DETECT_MODE,
-                &entry);
-    if (res != OK) {
-        ALOGE("%s: Unable to find face detect mode!", __FUNCTION__);
-        return BAD_VALUE;
-    }
+  status_t res;
+  camera_metadata_entry_t entry;
+  res = find_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_DETECT_MODE,
+                                   &entry);
+  if (res != OK) {
+    ALOGE("%s: Unable to find face detect mode!", __FUNCTION__);
+    return BAD_VALUE;
+  }
 
-    if (entry.data.u8[0] == ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) return OK;
+  if (entry.data.u8[0] == ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) return OK;
 
-    // The coordinate system for the face regions is the raw sensor pixel
-    // coordinates. Here, we map from the scene coordinates (0-19 in both axis)
-    // to raw pixels, for the scene defined in fake-pipeline2/Scene.cpp. We
-    // approximately place two faces on top of the windows of the house. No
-    // actual faces exist there, but might one day. Note that this doesn't
-    // account for the offsets used to account for aspect ratio differences, so
-    // the rectangles don't line up quite right.
-    const size_t numFaces = 2;
-    int32_t rects[numFaces * 4] = {
-        static_cast<int32_t>(mParent->mSensorWidth * 10 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 15 / 20),
-        static_cast<int32_t>(mParent->mSensorWidth * 12 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 17 / 20),
+  // The coordinate system for the face regions is the raw sensor pixel
+  // coordinates. Here, we map from the scene coordinates (0-19 in both axis)
+  // to raw pixels, for the scene defined in fake-pipeline2/Scene.cpp. We
+  // approximately place two faces on top of the windows of the house. No
+  // actual faces exist there, but might one day. Note that this doesn't
+  // account for the offsets used to account for aspect ratio differences, so
+  // the rectangles don't line up quite right.
+  const size_t numFaces = 2;
+  int32_t rects[numFaces * 4] = {
+      static_cast<int32_t>(mParent->mSensorWidth * 10 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 15 / 20),
+      static_cast<int32_t>(mParent->mSensorWidth * 12 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 17 / 20),
 
-        static_cast<int32_t>(mParent->mSensorWidth * 16 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 15 / 20),
-        static_cast<int32_t>(mParent->mSensorWidth * 18 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 17 / 20)
-    };
-    // To simulate some kind of real detection going on, we jitter the rectangles on
-    // each frame by a few pixels in each dimension.
-    for (size_t i = 0; i < numFaces * 4; i++) {
-        rects[i] += (int32_t)(((float)rand() / RAND_MAX) * 6 - 3);
-    }
-    // The confidence scores (0-100) are similarly jittered.
-    uint8_t scores[numFaces] = { 85, 95 };
-    for (size_t i = 0; i < numFaces; i++) {
-        scores[i] += (int32_t)(((float)rand() / RAND_MAX) * 10 - 5);
-    }
+      static_cast<int32_t>(mParent->mSensorWidth * 16 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 15 / 20),
+      static_cast<int32_t>(mParent->mSensorWidth * 18 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 17 / 20)};
+  // To simulate some kind of real detection going on, we jitter the rectangles
+  // on each frame by a few pixels in each dimension.
+  for (size_t i = 0; i < numFaces * 4; i++) {
+    rects[i] += (int32_t)(((float)rand() / RAND_MAX) * 6 - 3);
+  }
+  // The confidence scores (0-100) are similarly jittered.
+  uint8_t scores[numFaces] = {85, 95};
+  for (size_t i = 0; i < numFaces; i++) {
+    scores[i] += (int32_t)(((float)rand() / RAND_MAX) * 10 - 5);
+  }
 
-    res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_RECTANGLES,
-            rects, numFaces * 4);
-    if (res != OK) {
-        ALOGE("%s: Unable to add face rectangles!", __FUNCTION__);
-        return BAD_VALUE;
-    }
+  res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_RECTANGLES,
+                                  rects, numFaces * 4);
+  if (res != OK) {
+    ALOGE("%s: Unable to add face rectangles!", __FUNCTION__);
+    return BAD_VALUE;
+  }
 
-    res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_SCORES,
-            scores, numFaces);
-    if (res != OK) {
-        ALOGE("%s: Unable to add face scores!", __FUNCTION__);
-        return BAD_VALUE;
-    }
+  res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_SCORES, scores,
+                                  numFaces);
+  if (res != OK) {
+    ALOGE("%s: Unable to add face scores!", __FUNCTION__);
+    return BAD_VALUE;
+  }
 
-    if (entry.data.u8[0] == ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE) return OK;
+  if (entry.data.u8[0] == ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE) return OK;
 
-    // Advanced face detection options - add eye/mouth coordinates.  The
-    // coordinates in order are (leftEyeX, leftEyeY, rightEyeX, rightEyeY,
-    // mouthX, mouthY). The mapping is the same as the face rectangles.
-    int32_t features[numFaces * 6] = {
-        static_cast<int32_t>(mParent->mSensorWidth * 10.5 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 16 / 20),
-        static_cast<int32_t>(mParent->mSensorWidth * 11.5 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 16 / 20),
-        static_cast<int32_t>(mParent->mSensorWidth * 11 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 16.5 / 20),
+  // Advanced face detection options - add eye/mouth coordinates.  The
+  // coordinates in order are (leftEyeX, leftEyeY, rightEyeX, rightEyeY,
+  // mouthX, mouthY). The mapping is the same as the face rectangles.
+  int32_t features[numFaces * 6] = {
+      static_cast<int32_t>(mParent->mSensorWidth * 10.5 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 16 / 20),
+      static_cast<int32_t>(mParent->mSensorWidth * 11.5 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 16 / 20),
+      static_cast<int32_t>(mParent->mSensorWidth * 11 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 16.5 / 20),
 
-        static_cast<int32_t>(mParent->mSensorWidth * 16.5 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 16 / 20),
-        static_cast<int32_t>(mParent->mSensorWidth * 17.5 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 16 / 20),
-        static_cast<int32_t>(mParent->mSensorWidth * 17 / 20),
-        static_cast<int32_t>(mParent->mSensorHeight * 16.5 / 20),
-    };
-    // Jitter these a bit less than the rects
-    for (size_t i = 0; i < numFaces * 6; i++) {
-        features[i] += (int32_t)(((float)rand() / RAND_MAX) * 4 - 2);
-    }
-    // These are unique IDs that are used to identify each face while it's
-    // visible to the detector (if a face went away and came back, it'd get a
-    // new ID).
-    int32_t ids[numFaces] = {
-        100, 200
-    };
+      static_cast<int32_t>(mParent->mSensorWidth * 16.5 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 16 / 20),
+      static_cast<int32_t>(mParent->mSensorWidth * 17.5 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 16 / 20),
+      static_cast<int32_t>(mParent->mSensorWidth * 17 / 20),
+      static_cast<int32_t>(mParent->mSensorHeight * 16.5 / 20),
+  };
+  // Jitter these a bit less than the rects
+  for (size_t i = 0; i < numFaces * 6; i++) {
+    features[i] += (int32_t)(((float)rand() / RAND_MAX) * 4 - 2);
+  }
+  // These are unique IDs that are used to identify each face while it's
+  // visible to the detector (if a face went away and came back, it'd get a
+  // new ID).
+  int32_t ids[numFaces] = {100, 200};
 
-    res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_LANDMARKS,
-            features, numFaces * 6);
-    if (res != OK) {
-        ALOGE("%s: Unable to add face landmarks!", __FUNCTION__);
-        return BAD_VALUE;
-    }
+  res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_LANDMARKS,
+                                  features, numFaces * 6);
+  if (res != OK) {
+    ALOGE("%s: Unable to add face landmarks!", __FUNCTION__);
+    return BAD_VALUE;
+  }
 
-    res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_IDS,
-            ids, numFaces);
-    if (res != OK) {
-        ALOGE("%s: Unable to add face scores!", __FUNCTION__);
-        return BAD_VALUE;
-    }
+  res = add_camera_metadata_entry(frame, ANDROID_STATISTICS_FACE_IDS, ids,
+                                  numFaces);
+  if (res != OK) {
+    ALOGE("%s: Unable to add face scores!", __FUNCTION__);
+    return BAD_VALUE;
+  }
 
-    return OK;
+  return OK;
 }
 
-EmulatedFakeCamera2::ControlThread::ControlThread(EmulatedFakeCamera2 *parent):
-        Thread(false),
-        mParent(parent) {
-    mRunning = false;
+EmulatedFakeCamera2::ControlThread::ControlThread(EmulatedFakeCamera2 *parent)
+    : Thread(false), mParent(parent) {
+  mRunning = false;
 }
 
-EmulatedFakeCamera2::ControlThread::~ControlThread() {
-}
+EmulatedFakeCamera2::ControlThread::~ControlThread() {}
 
 status_t EmulatedFakeCamera2::ControlThread::readyToRun() {
-    Mutex::Autolock lock(mInputMutex);
+  Mutex::Autolock lock(mInputMutex);
 
-    ALOGV("Starting up ControlThread");
-    mRunning = true;
-    mStartAf = false;
-    mCancelAf = false;
-    mStartPrecapture = false;
+  ALOGV("Starting up ControlThread");
+  mRunning = true;
+  mStartAf = false;
+  mCancelAf = false;
+  mStartPrecapture = false;
 
-    mControlMode = ANDROID_CONTROL_MODE_AUTO;
+  mControlMode = ANDROID_CONTROL_MODE_AUTO;
 
-    mEffectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
-    mSceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+  mEffectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+  mSceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
 
-    mAfMode = ANDROID_CONTROL_AF_MODE_AUTO;
-    mAfModeChange = false;
+  mAfMode = ANDROID_CONTROL_AF_MODE_AUTO;
+  mAfModeChange = false;
 
-    mAeMode = ANDROID_CONTROL_AE_MODE_ON;
-    mAwbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+  mAeMode = ANDROID_CONTROL_AE_MODE_ON;
+  mAwbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
 
-    mAfTriggerId = 0;
-    mPrecaptureTriggerId = 0;
+  mAfTriggerId = 0;
+  mPrecaptureTriggerId = 0;
 
-    mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
-    mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
-    mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+  mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+  mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+  mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
 
-    mExposureTime = kNormalExposureTime;
+  mExposureTime = kNormalExposureTime;
 
-    mInputSignal.signal();
-    return NO_ERROR;
+  mInputSignal.signal();
+  return NO_ERROR;
 }
 
 status_t EmulatedFakeCamera2::ControlThread::waitUntilRunning() {
-    Mutex::Autolock lock(mInputMutex);
-    if (!mRunning) {
-        ALOGV("Waiting for control thread to start");
-        mInputSignal.wait(mInputMutex);
-    }
-    return OK;
+  Mutex::Autolock lock(mInputMutex);
+  if (!mRunning) {
+    ALOGV("Waiting for control thread to start");
+    mInputSignal.wait(mInputMutex);
+  }
+  return OK;
 }
 
-// Override android.control.* fields with 3A values before sending request to sensor
-status_t EmulatedFakeCamera2::ControlThread::processRequest(camera_metadata_t *request) {
-    Mutex::Autolock lock(mInputMutex);
-    // TODO: Add handling for all android.control.* fields here
-    camera_metadata_entry_t mode;
-    status_t res;
+// Override android.control.* fields with 3A values before sending request to
+// sensor
+status_t EmulatedFakeCamera2::ControlThread::processRequest(
+    camera_metadata_t *request) {
+  Mutex::Autolock lock(mInputMutex);
+  // TODO: Add handling for all android.control.* fields here
+  camera_metadata_entry_t mode;
+  status_t res;
 
-#define READ_IF_OK(res, what, def)                                             \
-    (((res) == OK) ? (what) : (uint8_t)(def))
+#define READ_IF_OK(res, what, def) (((res) == OK) ? (what) : (uint8_t)(def))
 
-    res = find_camera_metadata_entry(request,
-            ANDROID_CONTROL_MODE,
-            &mode);
-    mControlMode = READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_MODE_OFF);
+  res = find_camera_metadata_entry(request, ANDROID_CONTROL_MODE, &mode);
+  mControlMode = READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_MODE_OFF);
 
-    // disable all 3A
-    if (mControlMode == ANDROID_CONTROL_MODE_OFF) {
-        mEffectMode =   ANDROID_CONTROL_EFFECT_MODE_OFF;
+  // disable all 3A
+  if (mControlMode == ANDROID_CONTROL_MODE_OFF) {
+    mEffectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
 #if VSOC_PLATFORM_SDK_AFTER(K)
-        mSceneMode =    ANDROID_CONTROL_SCENE_MODE_DISABLED;
+    mSceneMode = ANDROID_CONTROL_SCENE_MODE_DISABLED;
 #else
-        mSceneMode =    ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
+    mSceneMode = ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED;
 #endif
-        mAfMode =       ANDROID_CONTROL_AF_MODE_OFF;
-        mAeLock =       ANDROID_CONTROL_AE_LOCK_ON;
-        mAeMode =       ANDROID_CONTROL_AE_MODE_OFF;
-        mAfModeChange = true;
-        mStartAf =      false;
-        mCancelAf =     true;
-        mAeState =      ANDROID_CONTROL_AE_STATE_INACTIVE;
-        mAwbMode =      ANDROID_CONTROL_AWB_MODE_OFF;
-        return res;
-    }
+    mAfMode = ANDROID_CONTROL_AF_MODE_OFF;
+    mAeLock = ANDROID_CONTROL_AE_LOCK_ON;
+    mAeMode = ANDROID_CONTROL_AE_MODE_OFF;
+    mAfModeChange = true;
+    mStartAf = false;
+    mCancelAf = true;
+    mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+    mAwbMode = ANDROID_CONTROL_AWB_MODE_OFF;
+    return res;
+  }
 
-    res = find_camera_metadata_entry(request,
-            ANDROID_CONTROL_EFFECT_MODE,
-            &mode);
-    mEffectMode = READ_IF_OK(res, mode.data.u8[0],
-                             ANDROID_CONTROL_EFFECT_MODE_OFF);
+  res = find_camera_metadata_entry(request, ANDROID_CONTROL_EFFECT_MODE, &mode);
+  mEffectMode =
+      READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_EFFECT_MODE_OFF);
 
-    res = find_camera_metadata_entry(request,
-            ANDROID_CONTROL_SCENE_MODE,
-            &mode);
+  res = find_camera_metadata_entry(request, ANDROID_CONTROL_SCENE_MODE, &mode);
 #if VSOC_PLATFORM_SDK_AFTER(K)
-    mSceneMode = READ_IF_OK(res, mode.data.u8[0],
-                             ANDROID_CONTROL_SCENE_MODE_DISABLED);
+  mSceneMode =
+      READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_SCENE_MODE_DISABLED);
 #else
-    mSceneMode = READ_IF_OK(res, mode.data.u8[0],
-                             ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED);
+  mSceneMode =
+      READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED);
 #endif
 
-    res = find_camera_metadata_entry(request,
-            ANDROID_CONTROL_AF_MODE,
-            &mode);
-    if (mAfMode != mode.data.u8[0]) {
-        ALOGV("AF new mode: %d, old mode %d", mode.data.u8[0], mAfMode);
-        mAfMode = mode.data.u8[0];
-        mAfModeChange = true;
-        mStartAf = false;
-        mCancelAf = false;
+  res = find_camera_metadata_entry(request, ANDROID_CONTROL_AF_MODE, &mode);
+  if (mAfMode != mode.data.u8[0]) {
+    ALOGV("AF new mode: %d, old mode %d", mode.data.u8[0], mAfMode);
+    mAfMode = mode.data.u8[0];
+    mAfModeChange = true;
+    mStartAf = false;
+    mCancelAf = false;
+  }
+
+  res = find_camera_metadata_entry(request, ANDROID_CONTROL_AE_MODE, &mode);
+  mAeMode = READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_AE_MODE_OFF);
+
+  res = find_camera_metadata_entry(request, ANDROID_CONTROL_AE_LOCK, &mode);
+  uint8_t aeLockVal =
+      READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_AE_LOCK_ON);
+  bool aeLock = (aeLockVal == ANDROID_CONTROL_AE_LOCK_ON);
+  if (mAeLock && !aeLock) {
+    mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+  }
+  mAeLock = aeLock;
+
+  res = find_camera_metadata_entry(request, ANDROID_CONTROL_AWB_MODE, &mode);
+  mAwbMode = READ_IF_OK(res, mode.data.u8[0], ANDROID_CONTROL_AWB_MODE_OFF);
+
+  // TODO: Override more control fields
+
+  if (mAeMode != ANDROID_CONTROL_AE_MODE_OFF) {
+    camera_metadata_entry_t exposureTime;
+    res = find_camera_metadata_entry(request, ANDROID_SENSOR_EXPOSURE_TIME,
+                                     &exposureTime);
+    if (res == OK) {
+      exposureTime.data.i64[0] = mExposureTime;
     }
-
-    res = find_camera_metadata_entry(request,
-            ANDROID_CONTROL_AE_MODE,
-            &mode);
-    mAeMode = READ_IF_OK(res, mode.data.u8[0],
-                             ANDROID_CONTROL_AE_MODE_OFF);
-
-    res = find_camera_metadata_entry(request,
-            ANDROID_CONTROL_AE_LOCK,
-            &mode);
-    uint8_t aeLockVal = READ_IF_OK(res, mode.data.u8[0],
-                                   ANDROID_CONTROL_AE_LOCK_ON);
-    bool aeLock = (aeLockVal == ANDROID_CONTROL_AE_LOCK_ON);
-    if (mAeLock && !aeLock) {
-        mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
-    }
-    mAeLock = aeLock;
-
-    res = find_camera_metadata_entry(request,
-            ANDROID_CONTROL_AWB_MODE,
-            &mode);
-    mAwbMode = READ_IF_OK(res, mode.data.u8[0],
-                          ANDROID_CONTROL_AWB_MODE_OFF);
-
-    // TODO: Override more control fields
-
-    if (mAeMode != ANDROID_CONTROL_AE_MODE_OFF) {
-        camera_metadata_entry_t exposureTime;
-        res = find_camera_metadata_entry(request,
-                ANDROID_SENSOR_EXPOSURE_TIME,
-                &exposureTime);
-        if (res == OK) {
-            exposureTime.data.i64[0] = mExposureTime;
-        }
-    }
+  }
 
 #undef READ_IF_OK
 
-    return OK;
+  return OK;
 }
 
 status_t EmulatedFakeCamera2::ControlThread::triggerAction(uint32_t msgType,
-        int32_t ext1, int32_t ext2) {
-    ALOGV("%s: Triggering %d (%d, %d)", __FUNCTION__, msgType, ext1, ext2);
-    Mutex::Autolock lock(mInputMutex);
-    switch (msgType) {
-        case CAMERA2_TRIGGER_AUTOFOCUS:
-            mAfTriggerId = ext1;
-            mStartAf = true;
-            mCancelAf = false;
-            break;
-        case CAMERA2_TRIGGER_CANCEL_AUTOFOCUS:
-            mAfTriggerId = ext1;
-            mStartAf = false;
-            mCancelAf = true;
-            break;
-        case CAMERA2_TRIGGER_PRECAPTURE_METERING:
-            mPrecaptureTriggerId = ext1;
-            mStartPrecapture = true;
-            break;
-        default:
-            ALOGE("%s: Unknown action triggered: %d (arguments %d %d)",
-                    __FUNCTION__, msgType, ext1, ext2);
-            return BAD_VALUE;
-    }
-    return OK;
+                                                           int32_t ext1,
+                                                           int32_t ext2) {
+  ALOGV("%s: Triggering %d (%d, %d)", __FUNCTION__, msgType, ext1, ext2);
+  Mutex::Autolock lock(mInputMutex);
+  switch (msgType) {
+    case CAMERA2_TRIGGER_AUTOFOCUS:
+      mAfTriggerId = ext1;
+      mStartAf = true;
+      mCancelAf = false;
+      break;
+    case CAMERA2_TRIGGER_CANCEL_AUTOFOCUS:
+      mAfTriggerId = ext1;
+      mStartAf = false;
+      mCancelAf = true;
+      break;
+    case CAMERA2_TRIGGER_PRECAPTURE_METERING:
+      mPrecaptureTriggerId = ext1;
+      mStartPrecapture = true;
+      break;
+    default:
+      ALOGE("%s: Unknown action triggered: %d (arguments %d %d)", __FUNCTION__,
+            msgType, ext1, ext2);
+      return BAD_VALUE;
+  }
+  return OK;
 }
 
-const nsecs_t EmulatedFakeCamera2::ControlThread::kControlCycleDelay = 100 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kControlCycleDelay =
+    100 * MSEC;
 const nsecs_t EmulatedFakeCamera2::ControlThread::kMinAfDuration = 500 * MSEC;
 const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxAfDuration = 900 * MSEC;
 const float EmulatedFakeCamera2::ControlThread::kAfSuccessRate = 0.9;
- // Once every 5 seconds
+// Once every 5 seconds
 const float EmulatedFakeCamera2::ControlThread::kContinuousAfStartRate =
-        kControlCycleDelay / 5.0 * SEC;
+    kControlCycleDelay / 5.0 * SEC;
 const nsecs_t EmulatedFakeCamera2::ControlThread::kMinAeDuration = 500 * MSEC;
 const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxAeDuration = 2 * SEC;
-const nsecs_t EmulatedFakeCamera2::ControlThread::kMinPrecaptureAeDuration = 100 * MSEC;
-const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxPrecaptureAeDuration = 400 * MSEC;
- // Once every 3 seconds
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMinPrecaptureAeDuration =
+    100 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kMaxPrecaptureAeDuration =
+    400 * MSEC;
+// Once every 3 seconds
 const float EmulatedFakeCamera2::ControlThread::kAeScanStartRate =
     kControlCycleDelay / 3000000000.0;
 
-const nsecs_t EmulatedFakeCamera2::ControlThread::kNormalExposureTime = 10 * MSEC;
+const nsecs_t EmulatedFakeCamera2::ControlThread::kNormalExposureTime =
+    10 * MSEC;
 const nsecs_t EmulatedFakeCamera2::ControlThread::kExposureJump = 2 * MSEC;
 const nsecs_t EmulatedFakeCamera2::ControlThread::kMinExposureTime = 1 * MSEC;
 
 bool EmulatedFakeCamera2::ControlThread::threadLoop() {
-    bool afModeChange = false;
-    bool afTriggered = false;
-    bool afCancelled = false;
-    uint8_t afState;
-    uint8_t afMode;
-    int32_t afTriggerId;
-    bool precaptureTriggered = false;
-    uint8_t aeState;
-    uint8_t aeMode;
-    bool    aeLock;
-    int32_t precaptureTriggerId;
-    nsecs_t nextSleep = kControlCycleDelay;
+  bool afModeChange = false;
+  bool afTriggered = false;
+  bool afCancelled = false;
+  uint8_t afState;
+  uint8_t afMode;
+  int32_t afTriggerId;
+  bool precaptureTriggered = false;
+  uint8_t aeState;
+  uint8_t aeMode;
+  bool aeLock;
+  int32_t precaptureTriggerId;
+  nsecs_t nextSleep = kControlCycleDelay;
 
-    {
-        Mutex::Autolock lock(mInputMutex);
-        if (mStartAf) {
-            ALOGD("Starting AF trigger processing");
-            afTriggered = true;
-            mStartAf = false;
-        } else if (mCancelAf) {
-            ALOGD("Starting cancel AF trigger processing");
-            afCancelled = true;
-            mCancelAf = false;
-        }
-        afState = mAfState;
-        afMode = mAfMode;
-        afModeChange = mAfModeChange;
-        mAfModeChange = false;
-
-        afTriggerId = mAfTriggerId;
-
-        if(mStartPrecapture) {
-            ALOGD("Starting precapture trigger processing");
-            precaptureTriggered = true;
-            mStartPrecapture = false;
-        }
-        aeState = mAeState;
-        aeMode = mAeMode;
-        aeLock = mAeLock;
-        precaptureTriggerId = mPrecaptureTriggerId;
+  {
+    Mutex::Autolock lock(mInputMutex);
+    if (mStartAf) {
+      ALOGD("Starting AF trigger processing");
+      afTriggered = true;
+      mStartAf = false;
+    } else if (mCancelAf) {
+      ALOGD("Starting cancel AF trigger processing");
+      afCancelled = true;
+      mCancelAf = false;
     }
+    afState = mAfState;
+    afMode = mAfMode;
+    afModeChange = mAfModeChange;
+    mAfModeChange = false;
 
-    if (afCancelled || afModeChange) {
-        ALOGV("Resetting AF state due to cancel/mode change");
-        afState = ANDROID_CONTROL_AF_STATE_INACTIVE;
-        updateAfState(afState, afTriggerId);
-        mAfScanDuration = 0;
-        mLockAfterPassiveScan = false;
+    afTriggerId = mAfTriggerId;
+
+    if (mStartPrecapture) {
+      ALOGD("Starting precapture trigger processing");
+      precaptureTriggered = true;
+      mStartPrecapture = false;
     }
+    aeState = mAeState;
+    aeMode = mAeMode;
+    aeLock = mAeLock;
+    precaptureTriggerId = mPrecaptureTriggerId;
+  }
 
-    uint8_t oldAfState = afState;
-
-    if (afTriggered) {
-        afState = processAfTrigger(afMode, afState);
-    }
-
-    afState = maybeStartAfScan(afMode, afState);
-    afState = updateAfScan(afMode, afState, &nextSleep);
+  if (afCancelled || afModeChange) {
+    ALOGV("Resetting AF state due to cancel/mode change");
+    afState = ANDROID_CONTROL_AF_STATE_INACTIVE;
     updateAfState(afState, afTriggerId);
+    mAfScanDuration = 0;
+    mLockAfterPassiveScan = false;
+  }
 
-    if (precaptureTriggered) {
-        aeState = processPrecaptureTrigger(aeMode, aeState);
-    }
+  uint8_t oldAfState = afState;
 
-    aeState = maybeStartAeScan(aeMode, aeLock, aeState);
-    aeState = updateAeScan(aeMode, aeLock, aeState, &nextSleep);
-    updateAeState(aeState, precaptureTriggerId);
+  if (afTriggered) {
+    afState = processAfTrigger(afMode, afState);
+  }
 
-    int ret;
-    timespec t;
-    t.tv_sec = 0;
-    t.tv_nsec = nextSleep;
-    do {
-        ret = nanosleep(&t, &t);
-    } while (ret != 0);
+  afState = maybeStartAfScan(afMode, afState);
+  afState = updateAfScan(afMode, afState, &nextSleep);
+  updateAfState(afState, afTriggerId);
 
-    if (mAfScanDuration > 0) {
-        mAfScanDuration -= nextSleep;
-    }
-    if (mAeScanDuration > 0) {
-        mAeScanDuration -= nextSleep;
-    }
+  if (precaptureTriggered) {
+    aeState = processPrecaptureTrigger(aeMode, aeState);
+  }
 
-    return true;
+  aeState = maybeStartAeScan(aeMode, aeLock, aeState);
+  aeState = updateAeScan(aeMode, aeLock, aeState, &nextSleep);
+  updateAeState(aeState, precaptureTriggerId);
+
+  int ret;
+  timespec t;
+  t.tv_sec = 0;
+  t.tv_nsec = nextSleep;
+  do {
+    ret = nanosleep(&t, &t);
+  } while (ret != 0);
+
+  if (mAfScanDuration > 0) {
+    mAfScanDuration -= nextSleep;
+  }
+  if (mAeScanDuration > 0) {
+    mAeScanDuration -= nextSleep;
+  }
+
+  return true;
 }
 
 int EmulatedFakeCamera2::ControlThread::processAfTrigger(uint8_t afMode,
-        uint8_t afState) {
-    switch (afMode) {
-        case ANDROID_CONTROL_AF_MODE_OFF:
-        case ANDROID_CONTROL_AF_MODE_EDOF:
-            // Do nothing
-            break;
-        case ANDROID_CONTROL_AF_MODE_MACRO:
-        case ANDROID_CONTROL_AF_MODE_AUTO:
-            switch (afState) {
-                case ANDROID_CONTROL_AF_STATE_INACTIVE:
-                case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
-                case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
-                    // Start new focusing cycle
-                    mAfScanDuration =  ((double)rand() / RAND_MAX) *
-                        (kMaxAfDuration - kMinAfDuration) + kMinAfDuration;
-                    afState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
-                    ALOGV("%s: AF scan start, duration %" PRId64 " ms",
-                          __FUNCTION__, mAfScanDuration / 1000000);
-                    break;
-                case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
-                    // Ignore new request, already scanning
-                    break;
-                default:
-                    ALOGE("Unexpected AF state in AUTO/MACRO AF mode: %d",
-                          afState);
-            }
-            break;
-        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
-            switch (afState) {
-                // Picture mode waits for passive scan to complete
-                case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
-                    mLockAfterPassiveScan = true;
-                    break;
-                case ANDROID_CONTROL_AF_STATE_INACTIVE:
-                    afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
-                    break;
-                case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
-                    afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
-                    break;
-                case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
-                case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
-                    // Must cancel to get out of these states
-                    break;
-                default:
-                    ALOGE("Unexpected AF state in CONTINUOUS_PICTURE AF mode: %d",
-                          afState);
-            }
-            break;
-        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
-            switch (afState) {
-                // Video mode does not wait for passive scan to complete
-                case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
-                case ANDROID_CONTROL_AF_STATE_INACTIVE:
-                    afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
-                    break;
-                case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
-                    afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
-                    break;
-                case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
-                case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
-                    // Must cancel to get out of these states
-                    break;
-                default:
-                    ALOGE("Unexpected AF state in CONTINUOUS_VIDEO AF mode: %d",
-                          afState);
-            }
-            break;
+                                                         uint8_t afState) {
+  switch (afMode) {
+    case ANDROID_CONTROL_AF_MODE_OFF:
+    case ANDROID_CONTROL_AF_MODE_EDOF:
+      // Do nothing
+      break;
+    case ANDROID_CONTROL_AF_MODE_MACRO:
+    case ANDROID_CONTROL_AF_MODE_AUTO:
+      switch (afState) {
+        case ANDROID_CONTROL_AF_STATE_INACTIVE:
+        case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+        case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+          // Start new focusing cycle
+          mAfScanDuration =
+              ((double)rand() / RAND_MAX) * (kMaxAfDuration - kMinAfDuration) +
+              kMinAfDuration;
+          afState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
+          ALOGV("%s: AF scan start, duration %" PRId64 " ms", __FUNCTION__,
+                mAfScanDuration / 1000000);
+          break;
+        case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
+          // Ignore new request, already scanning
+          break;
         default:
-            break;
-    }
-    return afState;
+          ALOGE("Unexpected AF state in AUTO/MACRO AF mode: %d", afState);
+      }
+      break;
+    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+      switch (afState) {
+        // Picture mode waits for passive scan to complete
+        case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+          mLockAfterPassiveScan = true;
+          break;
+        case ANDROID_CONTROL_AF_STATE_INACTIVE:
+          afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+          break;
+        case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+          afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+          break;
+        case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+        case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+          // Must cancel to get out of these states
+          break;
+        default:
+          ALOGE("Unexpected AF state in CONTINUOUS_PICTURE AF mode: %d",
+                afState);
+      }
+      break;
+    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+      switch (afState) {
+        // Video mode does not wait for passive scan to complete
+        case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+        case ANDROID_CONTROL_AF_STATE_INACTIVE:
+          afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+          break;
+        case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+          afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+          break;
+        case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+        case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+          // Must cancel to get out of these states
+          break;
+        default:
+          ALOGE("Unexpected AF state in CONTINUOUS_VIDEO AF mode: %d", afState);
+      }
+      break;
+    default:
+      break;
+  }
+  return afState;
 }
 
 int EmulatedFakeCamera2::ControlThread::maybeStartAfScan(uint8_t afMode,
-        uint8_t afState) {
-    if ((afMode == ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO ||
-            afMode == ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE) &&
-        (afState == ANDROID_CONTROL_AF_STATE_INACTIVE ||
-            afState == ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED)) {
-
-        bool startScan = ((double)rand() / RAND_MAX) < kContinuousAfStartRate;
-        if (startScan) {
-            // Start new passive focusing cycle
-            mAfScanDuration =  ((double)rand() / RAND_MAX) *
-                (kMaxAfDuration - kMinAfDuration) + kMinAfDuration;
-            afState = ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN;
-            ALOGV("%s: AF passive scan start, duration %" PRId64 " ms",
-                __FUNCTION__, mAfScanDuration / 1000000);
-        }
+                                                         uint8_t afState) {
+  if ((afMode == ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO ||
+       afMode == ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE) &&
+      (afState == ANDROID_CONTROL_AF_STATE_INACTIVE ||
+       afState == ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED)) {
+    bool startScan = ((double)rand() / RAND_MAX) < kContinuousAfStartRate;
+    if (startScan) {
+      // Start new passive focusing cycle
+      mAfScanDuration =
+          ((double)rand() / RAND_MAX) * (kMaxAfDuration - kMinAfDuration) +
+          kMinAfDuration;
+      afState = ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN;
+      ALOGV("%s: AF passive scan start, duration %" PRId64 " ms", __FUNCTION__,
+            mAfScanDuration / 1000000);
     }
-    return afState;
+  }
+  return afState;
 }
 
 int EmulatedFakeCamera2::ControlThread::updateAfScan(uint8_t afMode,
-        uint8_t afState, nsecs_t *maxSleep) {
-    if (! (afState == ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN ||
-            afState == ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN ) ) {
-        return afState;
-    }
-
-    if (mAfScanDuration <= 0) {
-        ALOGV("%s: AF scan done", __FUNCTION__);
-        switch (afMode) {
-            case ANDROID_CONTROL_AF_MODE_MACRO:
-            case ANDROID_CONTROL_AF_MODE_AUTO: {
-                bool success = ((double)rand() / RAND_MAX) < kAfSuccessRate;
-                if (success) {
-                    afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
-                } else {
-                    afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
-                }
-                break;
-            }
-            case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
-                if (mLockAfterPassiveScan) {
-                    afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
-                    mLockAfterPassiveScan = false;
-                } else {
-                    afState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
-                }
-                break;
-            case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
-                afState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
-                break;
-            default:
-                ALOGE("Unexpected AF mode in scan state");
-        }
-    } else {
-        if (mAfScanDuration <= *maxSleep) {
-            *maxSleep = mAfScanDuration;
-        }
-    }
+                                                     uint8_t afState,
+                                                     nsecs_t *maxSleep) {
+  if (!(afState == ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN ||
+        afState == ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN)) {
     return afState;
+  }
+
+  if (mAfScanDuration <= 0) {
+    ALOGV("%s: AF scan done", __FUNCTION__);
+    switch (afMode) {
+      case ANDROID_CONTROL_AF_MODE_MACRO:
+      case ANDROID_CONTROL_AF_MODE_AUTO: {
+        bool success = ((double)rand() / RAND_MAX) < kAfSuccessRate;
+        if (success) {
+          afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+        } else {
+          afState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+        }
+        break;
+      }
+      case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+        if (mLockAfterPassiveScan) {
+          afState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+          mLockAfterPassiveScan = false;
+        } else {
+          afState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
+        }
+        break;
+      case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+        afState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
+        break;
+      default:
+        ALOGE("Unexpected AF mode in scan state");
+    }
+  } else {
+    if (mAfScanDuration <= *maxSleep) {
+      *maxSleep = mAfScanDuration;
+    }
+  }
+  return afState;
 }
 
 void EmulatedFakeCamera2::ControlThread::updateAfState(uint8_t newState,
-        int32_t triggerId) {
-    Mutex::Autolock lock(mInputMutex);
-    if (mAfState != newState) {
-        ALOGV("%s: Autofocus state now %d, id %d", __FUNCTION__,
-                newState, triggerId);
-        mAfState = newState;
-        mParent->sendNotification(CAMERA2_MSG_AUTOFOCUS,
-                newState, triggerId, 0);
-    }
+                                                       int32_t triggerId) {
+  Mutex::Autolock lock(mInputMutex);
+  if (mAfState != newState) {
+    ALOGV("%s: Autofocus state now %d, id %d", __FUNCTION__, newState,
+          triggerId);
+    mAfState = newState;
+    mParent->sendNotification(CAMERA2_MSG_AUTOFOCUS, newState, triggerId, 0);
+  }
 }
 
-int EmulatedFakeCamera2::ControlThread::processPrecaptureTrigger(uint8_t aeMode,
-        uint8_t aeState) {
-    switch (aeMode) {
-        case ANDROID_CONTROL_AE_MODE_OFF:
-            // Don't do anything for these
-            return aeState;
-        case ANDROID_CONTROL_AE_MODE_ON:
-        case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
-        case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
-        case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE:
-            // Trigger a precapture cycle
-            aeState = ANDROID_CONTROL_AE_STATE_PRECAPTURE;
-            mAeScanDuration = ((double)rand() / RAND_MAX) *
-                    (kMaxPrecaptureAeDuration - kMinPrecaptureAeDuration) +
-                    kMinPrecaptureAeDuration;
-            ALOGD("%s: AE precapture scan start, duration %" PRId64 " ms",
-                    __FUNCTION__, mAeScanDuration / 1000000);
-
-    }
-    return aeState;
+int EmulatedFakeCamera2::ControlThread::processPrecaptureTrigger(
+    uint8_t aeMode, uint8_t aeState) {
+  switch (aeMode) {
+    case ANDROID_CONTROL_AE_MODE_OFF:
+      // Don't do anything for these
+      return aeState;
+    case ANDROID_CONTROL_AE_MODE_ON:
+    case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
+    case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
+    case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE:
+      // Trigger a precapture cycle
+      aeState = ANDROID_CONTROL_AE_STATE_PRECAPTURE;
+      mAeScanDuration =
+          ((double)rand() / RAND_MAX) *
+              (kMaxPrecaptureAeDuration - kMinPrecaptureAeDuration) +
+          kMinPrecaptureAeDuration;
+      ALOGD("%s: AE precapture scan start, duration %" PRId64 " ms",
+            __FUNCTION__, mAeScanDuration / 1000000);
+  }
+  return aeState;
 }
 
 int EmulatedFakeCamera2::ControlThread::maybeStartAeScan(uint8_t aeMode,
-        bool aeLocked,
-        uint8_t aeState) {
-    if (aeLocked) return aeState;
-    switch (aeMode) {
-        case ANDROID_CONTROL_AE_MODE_OFF:
-            break;
-        case ANDROID_CONTROL_AE_MODE_ON:
-        case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
-        case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
-        case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE: {
-            if (aeState != ANDROID_CONTROL_AE_STATE_INACTIVE &&
-                    aeState != ANDROID_CONTROL_AE_STATE_CONVERGED) break;
+                                                         bool aeLocked,
+                                                         uint8_t aeState) {
+  if (aeLocked) return aeState;
+  switch (aeMode) {
+    case ANDROID_CONTROL_AE_MODE_OFF:
+      break;
+    case ANDROID_CONTROL_AE_MODE_ON:
+    case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH:
+    case ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH:
+    case ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE: {
+      if (aeState != ANDROID_CONTROL_AE_STATE_INACTIVE &&
+          aeState != ANDROID_CONTROL_AE_STATE_CONVERGED)
+        break;
 
-            bool startScan = ((double)rand() / RAND_MAX) < kAeScanStartRate;
-            if (startScan) {
-                mAeScanDuration = ((double)rand() / RAND_MAX) *
-                (kMaxAeDuration - kMinAeDuration) + kMinAeDuration;
-                aeState = ANDROID_CONTROL_AE_STATE_SEARCHING;
-                ALOGV("%s: AE scan start, duration %" PRId64 " ms",
-                        __FUNCTION__, mAeScanDuration / 1000000);
-            }
-        }
+      bool startScan = ((double)rand() / RAND_MAX) < kAeScanStartRate;
+      if (startScan) {
+        mAeScanDuration =
+            ((double)rand() / RAND_MAX) * (kMaxAeDuration - kMinAeDuration) +
+            kMinAeDuration;
+        aeState = ANDROID_CONTROL_AE_STATE_SEARCHING;
+        ALOGV("%s: AE scan start, duration %" PRId64 " ms", __FUNCTION__,
+              mAeScanDuration / 1000000);
+      }
     }
+  }
 
-    return aeState;
+  return aeState;
 }
 
 int EmulatedFakeCamera2::ControlThread::updateAeScan(uint8_t aeMode,
-        bool aeLock, uint8_t aeState, nsecs_t *maxSleep) {
-    if (aeLock && aeState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
-        mAeScanDuration = 0;
-        aeState = ANDROID_CONTROL_AE_STATE_LOCKED;
-    } else if ((aeState == ANDROID_CONTROL_AE_STATE_SEARCHING) ||
-            (aeState == ANDROID_CONTROL_AE_STATE_PRECAPTURE ) ) {
-        if (mAeScanDuration <= 0) {
-            ALOGV("%s: AE scan done", __FUNCTION__);
-            aeState = aeLock ?
-                    ANDROID_CONTROL_AE_STATE_LOCKED :ANDROID_CONTROL_AE_STATE_CONVERGED;
+                                                     bool aeLock,
+                                                     uint8_t aeState,
+                                                     nsecs_t *maxSleep) {
+  if (aeLock && aeState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+    mAeScanDuration = 0;
+    aeState = ANDROID_CONTROL_AE_STATE_LOCKED;
+  } else if ((aeState == ANDROID_CONTROL_AE_STATE_SEARCHING) ||
+             (aeState == ANDROID_CONTROL_AE_STATE_PRECAPTURE)) {
+    if (mAeScanDuration <= 0) {
+      ALOGV("%s: AE scan done", __FUNCTION__);
+      aeState = aeLock ? ANDROID_CONTROL_AE_STATE_LOCKED
+                       : ANDROID_CONTROL_AE_STATE_CONVERGED;
 
-            Mutex::Autolock lock(mInputMutex);
-            mExposureTime = kNormalExposureTime;
-        } else {
-            if (mAeScanDuration <= *maxSleep) {
-                *maxSleep = mAeScanDuration;
-            }
+      Mutex::Autolock lock(mInputMutex);
+      mExposureTime = kNormalExposureTime;
+    } else {
+      if (mAeScanDuration <= *maxSleep) {
+        *maxSleep = mAeScanDuration;
+      }
 
-            int64_t exposureDelta =
-                    ((double)rand() / RAND_MAX) * 2 * kExposureJump -
-                    kExposureJump;
-            Mutex::Autolock lock(mInputMutex);
-            mExposureTime = mExposureTime + exposureDelta;
-            if (mExposureTime < kMinExposureTime) mExposureTime = kMinExposureTime;
-        }
+      int64_t exposureDelta =
+          ((double)rand() / RAND_MAX) * 2 * kExposureJump - kExposureJump;
+      Mutex::Autolock lock(mInputMutex);
+      mExposureTime = mExposureTime + exposureDelta;
+      if (mExposureTime < kMinExposureTime) mExposureTime = kMinExposureTime;
     }
+  }
 
-    return aeState;
+  return aeState;
 }
 
-
 void EmulatedFakeCamera2::ControlThread::updateAeState(uint8_t newState,
-        int32_t triggerId) {
-    Mutex::Autolock lock(mInputMutex);
-    if (mAeState != newState) {
-        ALOGV("%s: Autoexposure state now %d, id %d", __FUNCTION__,
-                newState, triggerId);
-        mAeState = newState;
-        mParent->sendNotification(CAMERA2_MSG_AUTOEXPOSURE,
-                newState, triggerId, 0);
-    }
+                                                       int32_t triggerId) {
+  Mutex::Autolock lock(mInputMutex);
+  if (mAeState != newState) {
+    ALOGV("%s: Autoexposure state now %d, id %d", __FUNCTION__, newState,
+          triggerId);
+    mAeState = newState;
+    mParent->sendNotification(CAMERA2_MSG_AUTOEXPOSURE, newState, triggerId, 0);
+  }
 }
 
 /** Private methods */
 
-status_t EmulatedFakeCamera2::constructStaticInfo(
-        camera_metadata_t **info,
-        bool sizeRequest) const {
+status_t EmulatedFakeCamera2::constructStaticInfo(camera_metadata_t **info,
+                                                  bool sizeRequest) const {
+  size_t entryCount = 0;
+  size_t dataCount = 0;
+  status_t ret;
 
-    size_t entryCount = 0;
-    size_t dataCount = 0;
-    status_t ret;
+#define ADD_OR_SIZE(tag, data, count)                                          \
+  if ((ret = addOrSize(*info, sizeRequest, &entryCount, &dataCount, tag, data, \
+                       count)) != OK)                                          \
+  return ret
 
-#define ADD_OR_SIZE( tag, data, count ) \
-    if ( ( ret = addOrSize(*info, sizeRequest, &entryCount, &dataCount, \
-            tag, data, count) ) != OK ) return ret
+  // android.lens
 
-    // android.lens
+  // 5 cm min focus distance for back camera, infinity (fixed focus) for front
+  const float minFocusDistance = mFacingBack ? 1.0 / 0.05 : 0.0;
+  ADD_OR_SIZE(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE, &minFocusDistance, 1);
+  // 5 m hyperfocal distance for back camera, infinity (fixed focus) for front
+  const float hyperFocalDistance = mFacingBack ? 1.0 / 5.0 : 0.0;
+  ADD_OR_SIZE(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, &minFocusDistance, 1);
 
-    // 5 cm min focus distance for back camera, infinity (fixed focus) for front
-    const float minFocusDistance = mFacingBack ? 1.0/0.05 : 0.0;
-    ADD_OR_SIZE(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
-            &minFocusDistance, 1);
-    // 5 m hyperfocal distance for back camera, infinity (fixed focus) for front
-    const float hyperFocalDistance = mFacingBack ? 1.0/5.0 : 0.0;
-    ADD_OR_SIZE(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
-            &minFocusDistance, 1);
+  static const float focalLength = 3.30f;  // mm
+  ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, &focalLength, 1);
+  static const float aperture = 2.8f;
+  ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_APERTURES, &aperture, 1);
+  static const float filterDensity = 0;
+  ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES, &filterDensity, 1);
+  static const uint8_t availableOpticalStabilization =
+      ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+  ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+              &availableOpticalStabilization, 1);
 
-    static const float focalLength = 3.30f; // mm
-    ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
-            &focalLength, 1);
-    static const float aperture = 2.8f;
-    ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
-            &aperture, 1);
-    static const float filterDensity = 0;
-    ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES,
-            &filterDensity, 1);
-    static const uint8_t availableOpticalStabilization =
-            ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
-    ADD_OR_SIZE(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
-            &availableOpticalStabilization, 1);
+  static const int32_t lensShadingMapSize[] = {1, 1};
+  ADD_OR_SIZE(ANDROID_LENS_INFO_SHADING_MAP_SIZE, lensShadingMapSize,
+              sizeof(lensShadingMapSize) / sizeof(int32_t));
 
-    static const int32_t lensShadingMapSize[] = {1, 1};
-    ADD_OR_SIZE(ANDROID_LENS_INFO_SHADING_MAP_SIZE, lensShadingMapSize,
-            sizeof(lensShadingMapSize)/sizeof(int32_t));
+  int32_t lensFacing =
+      mFacingBack ? ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT;
+  ADD_OR_SIZE(ANDROID_LENS_FACING, &lensFacing, 1);
 
-    int32_t lensFacing = mFacingBack ?
-            ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT;
-    ADD_OR_SIZE(ANDROID_LENS_FACING, &lensFacing, 1);
+  // android.sensor
 
-    // android.sensor
+  ADD_OR_SIZE(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+              Sensor::kExposureTimeRange, 2);
 
-    ADD_OR_SIZE(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
-            Sensor::kExposureTimeRange, 2);
+  ADD_OR_SIZE(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+              &Sensor::kFrameDurationRange[1], 1);
 
-    ADD_OR_SIZE(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
-            &Sensor::kFrameDurationRange[1], 1);
+  ADD_OR_SIZE(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, Sensor::kSensitivityRange,
+              sizeof(Sensor::kSensitivityRange) / sizeof(int32_t));
 
-    ADD_OR_SIZE(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
-            Sensor::kSensitivityRange,
-            sizeof(Sensor::kSensitivityRange)
-            /sizeof(int32_t));
+  ADD_OR_SIZE(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+              &Sensor::kColorFilterArrangement, 1);
 
-    ADD_OR_SIZE(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
-            &Sensor::kColorFilterArrangement, 1);
+  static const float sensorPhysicalSize[2] = {3.20f, 2.40f};  // mm
+  ADD_OR_SIZE(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, sensorPhysicalSize, 2);
 
-    static const float sensorPhysicalSize[2] = {3.20f, 2.40f}; // mm
-    ADD_OR_SIZE(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
-            sensorPhysicalSize, 2);
+  const int32_t pixelArray[] = {mSensorWidth, mSensorHeight};
+  ADD_OR_SIZE(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, pixelArray, 2);
 
-    const int32_t pixelArray[] = {mSensorWidth, mSensorHeight};
-    ADD_OR_SIZE(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
-            pixelArray, 2);
+  ADD_OR_SIZE(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, pixelArray, 2);
 
-    ADD_OR_SIZE(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
-            pixelArray, 2);
+  ADD_OR_SIZE(ANDROID_SENSOR_INFO_WHITE_LEVEL, &Sensor::kMaxRawValue, 1);
 
-    ADD_OR_SIZE(ANDROID_SENSOR_INFO_WHITE_LEVEL,
-            &Sensor::kMaxRawValue, 1);
+  static const int32_t blackLevelPattern[4] = {
+      static_cast<int32_t>(Sensor::kBlackLevel),
+      static_cast<int32_t>(Sensor::kBlackLevel),
+      static_cast<int32_t>(Sensor::kBlackLevel),
+      static_cast<int32_t>(Sensor::kBlackLevel)};
+  ADD_OR_SIZE(ANDROID_SENSOR_BLACK_LEVEL_PATTERN, blackLevelPattern,
+              sizeof(blackLevelPattern) / sizeof(int32_t));
 
-    static const int32_t blackLevelPattern[4] = {
-        static_cast<int32_t>(Sensor::kBlackLevel),
-        static_cast<int32_t>(Sensor::kBlackLevel),
-        static_cast<int32_t>(Sensor::kBlackLevel),
-        static_cast<int32_t>(Sensor::kBlackLevel)
-    };
-    ADD_OR_SIZE(ANDROID_SENSOR_BLACK_LEVEL_PATTERN,
-            blackLevelPattern, sizeof(blackLevelPattern)/sizeof(int32_t));
+  // TODO: sensor color calibration fields
 
-    //TODO: sensor color calibration fields
+  // android.flash
+  static const uint8_t flashAvailable = 0;
+  ADD_OR_SIZE(ANDROID_FLASH_INFO_AVAILABLE, &flashAvailable, 1);
 
-    // android.flash
-    static const uint8_t flashAvailable = 0;
-    ADD_OR_SIZE(ANDROID_FLASH_INFO_AVAILABLE, &flashAvailable, 1);
+  static const int64_t flashChargeDuration = 0;
+  ADD_OR_SIZE(ANDROID_FLASH_INFO_CHARGE_DURATION, &flashChargeDuration, 1);
 
-    static const int64_t flashChargeDuration = 0;
-    ADD_OR_SIZE(ANDROID_FLASH_INFO_CHARGE_DURATION, &flashChargeDuration, 1);
+  // android.tonemap
 
-    // android.tonemap
+  static const int32_t tonemapCurvePoints = 128;
+  ADD_OR_SIZE(ANDROID_TONEMAP_MAX_CURVE_POINTS, &tonemapCurvePoints, 1);
 
-    static const int32_t tonemapCurvePoints = 128;
-    ADD_OR_SIZE(ANDROID_TONEMAP_MAX_CURVE_POINTS, &tonemapCurvePoints, 1);
+  // android.scaler
 
-    // android.scaler
+  ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_FORMATS, kAvailableFormats,
+              sizeof(kAvailableFormats) / sizeof(uint32_t));
 
-    ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_FORMATS,
-            kAvailableFormats,
-            sizeof(kAvailableFormats)/sizeof(uint32_t));
+  ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_RAW_SIZES, &mAvailableRawSizes.front(),
+              mAvailableRawSizes.size());
 
-    ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_RAW_SIZES,
-            &mAvailableRawSizes.front(),
-            mAvailableRawSizes.size());
+  ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
+              kAvailableRawMinDurations,
+              sizeof(kAvailableRawMinDurations) / sizeof(uint64_t));
 
-    ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
-            kAvailableRawMinDurations,
-            sizeof(kAvailableRawMinDurations)/sizeof(uint64_t));
+  ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+              &mAvailableProcessedSizes.front(),
+              mAvailableProcessedSizes.size());
 
-    ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
-            &mAvailableProcessedSizes.front(),
-            mAvailableProcessedSizes.size());
+  ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS,
+              kAvailableProcessedMinDurations,
+              sizeof(kAvailableProcessedMinDurations) / sizeof(uint64_t));
 
-    ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS,
-            kAvailableProcessedMinDurations,
-            sizeof(kAvailableProcessedMinDurations)/sizeof(uint64_t));
+  ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_SIZES, &mAvailableJpegSizes.front(),
+              mAvailableJpegSizes.size());
 
-    ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_SIZES,
-            &mAvailableJpegSizes.front(),
-            mAvailableJpegSizes.size());
+  ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS,
+              kAvailableJpegMinDurations,
+              sizeof(kAvailableJpegMinDurations) / sizeof(uint64_t));
 
-    ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS,
-            kAvailableJpegMinDurations,
-            sizeof(kAvailableJpegMinDurations)/sizeof(uint64_t));
+  static const float maxZoom = 10;
+  ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, &maxZoom, 1);
 
-    static const float maxZoom = 10;
-    ADD_OR_SIZE(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
-            &maxZoom, 1);
+  // android.jpeg
 
-    // android.jpeg
+  static const int32_t jpegThumbnailSizes[] = {0, 0, 160, 120, 320, 240};
+  ADD_OR_SIZE(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, jpegThumbnailSizes,
+              sizeof(jpegThumbnailSizes) / sizeof(int32_t));
 
-    static const int32_t jpegThumbnailSizes[] = {
-            0, 0,
-            160, 120,
-            320, 240
-     };
-    ADD_OR_SIZE(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
-            jpegThumbnailSizes, sizeof(jpegThumbnailSizes)/sizeof(int32_t));
+  static const int32_t jpegMaxSize = JpegCompressor::kMaxJpegSize;
+  ADD_OR_SIZE(ANDROID_JPEG_MAX_SIZE, &jpegMaxSize, 1);
 
-    static const int32_t jpegMaxSize = JpegCompressor::kMaxJpegSize;
-    ADD_OR_SIZE(ANDROID_JPEG_MAX_SIZE, &jpegMaxSize, 1);
+  // android.stats
 
-    // android.stats
+  static const uint8_t availableFaceDetectModes[] = {
+      ANDROID_STATISTICS_FACE_DETECT_MODE_OFF,
+      ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE,
+      ANDROID_STATISTICS_FACE_DETECT_MODE_FULL};
 
-    static const uint8_t availableFaceDetectModes[] = {
-        ANDROID_STATISTICS_FACE_DETECT_MODE_OFF,
-        ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE,
-        ANDROID_STATISTICS_FACE_DETECT_MODE_FULL
-    };
+  ADD_OR_SIZE(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+              availableFaceDetectModes, sizeof(availableFaceDetectModes));
 
-    ADD_OR_SIZE(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
-            availableFaceDetectModes,
-            sizeof(availableFaceDetectModes));
+  static const int32_t maxFaceCount = 8;
+  ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, &maxFaceCount, 1);
 
-    static const int32_t maxFaceCount = 8;
-    ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
-            &maxFaceCount, 1);
+  static const int32_t histogramSize = 64;
+  ADD_OR_SIZE(ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT, &histogramSize,
+              1);
 
-    static const int32_t histogramSize = 64;
-    ADD_OR_SIZE(ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT,
-            &histogramSize, 1);
+  static const int32_t maxHistogramCount = 1000;
+  ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT, &maxHistogramCount,
+              1);
 
-    static const int32_t maxHistogramCount = 1000;
-    ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT,
-            &maxHistogramCount, 1);
+  static const int32_t sharpnessMapSize[2] = {64, 64};
+  ADD_OR_SIZE(ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE, sharpnessMapSize,
+              sizeof(sharpnessMapSize) / sizeof(int32_t));
 
-    static const int32_t sharpnessMapSize[2] = {64, 64};
-    ADD_OR_SIZE(ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE,
-            sharpnessMapSize, sizeof(sharpnessMapSize)/sizeof(int32_t));
+  static const int32_t maxSharpnessMapValue = 1000;
+  ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE,
+              &maxSharpnessMapValue, 1);
 
-    static const int32_t maxSharpnessMapValue = 1000;
-    ADD_OR_SIZE(ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE,
-            &maxSharpnessMapValue, 1);
+  // android.control
 
-    // android.control
-
-    static const uint8_t availableSceneModes[] = {
+  static const uint8_t availableSceneModes[] = {
 #if VSOC_PLATFORM_SDK_AFTER(K)
-            ANDROID_CONTROL_SCENE_MODE_DISABLED
+    ANDROID_CONTROL_SCENE_MODE_DISABLED
 #else
-            ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED
+    ANDROID_CONTROL_SCENE_MODE_UNSUPPORTED
 #endif
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
-            availableSceneModes, sizeof(availableSceneModes));
+  };
+  ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, availableSceneModes,
+              sizeof(availableSceneModes));
 
-    static const uint8_t availableEffects[] = {
-            ANDROID_CONTROL_EFFECT_MODE_OFF
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_EFFECTS,
-            availableEffects, sizeof(availableEffects));
+  static const uint8_t availableEffects[] = {ANDROID_CONTROL_EFFECT_MODE_OFF};
+  ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_EFFECTS, availableEffects,
+              sizeof(availableEffects));
 
-    static const int32_t max3aRegions[] = {/*AE*/ 0,/*AWB*/ 0,/*AF*/ 0};
-    ADD_OR_SIZE(ANDROID_CONTROL_MAX_REGIONS,
-            max3aRegions, sizeof(max3aRegions)/sizeof(max3aRegions[0]));
+  static const int32_t max3aRegions[] = {/*AE*/ 0, /*AWB*/ 0, /*AF*/ 0};
+  ADD_OR_SIZE(ANDROID_CONTROL_MAX_REGIONS, max3aRegions,
+              sizeof(max3aRegions) / sizeof(max3aRegions[0]));
 
-    static const uint8_t availableAeModes[] = {
-            ANDROID_CONTROL_AE_MODE_OFF,
-            ANDROID_CONTROL_AE_MODE_ON
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_MODES,
-            availableAeModes, sizeof(availableAeModes));
+  static const uint8_t availableAeModes[] = {ANDROID_CONTROL_AE_MODE_OFF,
+                                             ANDROID_CONTROL_AE_MODE_ON};
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_MODES, availableAeModes,
+              sizeof(availableAeModes));
 
-    static const camera_metadata_rational exposureCompensationStep = {
-            1, 3
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_COMPENSATION_STEP,
-            &exposureCompensationStep, 1);
+  static const camera_metadata_rational exposureCompensationStep = {1, 3};
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_COMPENSATION_STEP, &exposureCompensationStep,
+              1);
 
-    int32_t exposureCompensationRange[] = {-9, 9};
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
-            exposureCompensationRange,
-            sizeof(exposureCompensationRange)/sizeof(int32_t));
+  int32_t exposureCompensationRange[] = {-9, 9};
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_COMPENSATION_RANGE, exposureCompensationRange,
+              sizeof(exposureCompensationRange) / sizeof(int32_t));
 
-    static const int32_t availableTargetFpsRanges[] = {
-            5, 30, 15, 30
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
-            availableTargetFpsRanges,
-            sizeof(availableTargetFpsRanges)/sizeof(int32_t));
+  static const int32_t availableTargetFpsRanges[] = {5, 30, 15, 30};
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+              availableTargetFpsRanges,
+              sizeof(availableTargetFpsRanges) / sizeof(int32_t));
 
-    static const uint8_t availableAntibandingModes[] = {
-            ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
-            ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
-            availableAntibandingModes, sizeof(availableAntibandingModes));
+  static const uint8_t availableAntibandingModes[] = {
+      ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
+      ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO};
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+              availableAntibandingModes, sizeof(availableAntibandingModes));
 
-    static const uint8_t availableAwbModes[] = {
-            ANDROID_CONTROL_AWB_MODE_OFF,
-            ANDROID_CONTROL_AWB_MODE_AUTO,
-            ANDROID_CONTROL_AWB_MODE_INCANDESCENT,
-            ANDROID_CONTROL_AWB_MODE_FLUORESCENT,
-            ANDROID_CONTROL_AWB_MODE_DAYLIGHT,
-            ANDROID_CONTROL_AWB_MODE_SHADE
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
-            availableAwbModes, sizeof(availableAwbModes));
+  static const uint8_t availableAwbModes[] = {
+      ANDROID_CONTROL_AWB_MODE_OFF,
+      ANDROID_CONTROL_AWB_MODE_AUTO,
+      ANDROID_CONTROL_AWB_MODE_INCANDESCENT,
+      ANDROID_CONTROL_AWB_MODE_FLUORESCENT,
+      ANDROID_CONTROL_AWB_MODE_DAYLIGHT,
+      ANDROID_CONTROL_AWB_MODE_SHADE};
+  ADD_OR_SIZE(ANDROID_CONTROL_AWB_AVAILABLE_MODES, availableAwbModes,
+              sizeof(availableAwbModes));
 
-    static const uint8_t availableAfModesBack[] = {
-            ANDROID_CONTROL_AF_MODE_OFF,
-            ANDROID_CONTROL_AF_MODE_AUTO,
-            ANDROID_CONTROL_AF_MODE_MACRO,
-            ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
-            ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE
-    };
+  static const uint8_t availableAfModesBack[] = {
+      ANDROID_CONTROL_AF_MODE_OFF, ANDROID_CONTROL_AF_MODE_AUTO,
+      ANDROID_CONTROL_AF_MODE_MACRO, ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
+      ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE};
 
-    static const uint8_t availableAfModesFront[] = {
-            ANDROID_CONTROL_AF_MODE_OFF
-    };
+  static const uint8_t availableAfModesFront[] = {ANDROID_CONTROL_AF_MODE_OFF};
 
-    if (mFacingBack) {
-        ADD_OR_SIZE(ANDROID_CONTROL_AF_AVAILABLE_MODES,
-                    availableAfModesBack, sizeof(availableAfModesBack));
-    } else {
-        ADD_OR_SIZE(ANDROID_CONTROL_AF_AVAILABLE_MODES,
-                    availableAfModesFront, sizeof(availableAfModesFront));
-    }
+  if (mFacingBack) {
+    ADD_OR_SIZE(ANDROID_CONTROL_AF_AVAILABLE_MODES, availableAfModesBack,
+                sizeof(availableAfModesBack));
+  } else {
+    ADD_OR_SIZE(ANDROID_CONTROL_AF_AVAILABLE_MODES, availableAfModesFront,
+                sizeof(availableAfModesFront));
+  }
 
-    static const uint8_t availableVstabModes[] = {
-            ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
-            availableVstabModes, sizeof(availableVstabModes));
+  static const uint8_t availableVstabModes[] = {
+      ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF};
+  ADD_OR_SIZE(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+              availableVstabModes, sizeof(availableVstabModes));
 
 #undef ADD_OR_SIZE
-    /** Allocate metadata if sizing */
-    if (sizeRequest) {
-        ALOGV("Allocating %zu entries, %zu extra bytes for "
-                "static camera info",
-                entryCount, dataCount);
-        *info = allocate_camera_metadata(entryCount, dataCount);
-        if (*info == NULL) {
-            ALOGE("Unable to allocate camera static info"
-                    "(%zu entries, %zu bytes extra data)",
-                    entryCount, dataCount);
-            return NO_MEMORY;
-        }
+  /** Allocate metadata if sizing */
+  if (sizeRequest) {
+    ALOGV(
+        "Allocating %zu entries, %zu extra bytes for "
+        "static camera info",
+        entryCount, dataCount);
+    *info = allocate_camera_metadata(entryCount, dataCount);
+    if (*info == NULL) {
+      ALOGE(
+          "Unable to allocate camera static info"
+          "(%zu entries, %zu bytes extra data)",
+          entryCount, dataCount);
+      return NO_MEMORY;
     }
-    return OK;
+  }
+  return OK;
 }
 
 status_t EmulatedFakeCamera2::constructDefaultRequest(
-        int request_template,
-        camera_metadata_t **request,
-        bool sizeRequest) const {
+    int request_template, camera_metadata_t **request, bool sizeRequest) const {
+  size_t entryCount = 0;
+  size_t dataCount = 0;
+  status_t ret;
 
-    size_t entryCount = 0;
-    size_t dataCount = 0;
-    status_t ret;
+#define ADD_OR_SIZE(tag, data, count)                                       \
+  if ((ret = addOrSize(*request, sizeRequest, &entryCount, &dataCount, tag, \
+                       data, count)) != OK)                                 \
+  return ret
 
-#define ADD_OR_SIZE( tag, data, count ) \
-    if ( ( ret = addOrSize(*request, sizeRequest, &entryCount, &dataCount, \
-            tag, data, count) ) != OK ) return ret
+  /** android.request */
 
-    /** android.request */
+  static const uint8_t requestType = ANDROID_REQUEST_TYPE_CAPTURE;
+  ADD_OR_SIZE(ANDROID_REQUEST_TYPE, &requestType, 1);
 
-    static const uint8_t requestType = ANDROID_REQUEST_TYPE_CAPTURE;
-    ADD_OR_SIZE(ANDROID_REQUEST_TYPE, &requestType, 1);
+  static const uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
+  ADD_OR_SIZE(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1);
 
-    static const uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
-    ADD_OR_SIZE(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1);
+  static const int32_t id = 0;
+  ADD_OR_SIZE(ANDROID_REQUEST_ID, &id, 1);
 
-    static const int32_t id = 0;
-    ADD_OR_SIZE(ANDROID_REQUEST_ID, &id, 1);
+  static const int32_t frameCount = 0;
+  ADD_OR_SIZE(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1);
 
-    static const int32_t frameCount = 0;
-    ADD_OR_SIZE(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1);
+  // OUTPUT_STREAMS set by user
+  entryCount += 1;
+  dataCount += 5;  // TODO: Should be maximum stream number
 
-    // OUTPUT_STREAMS set by user
-    entryCount += 1;
-    dataCount += 5; // TODO: Should be maximum stream number
+  /** android.lens */
 
-    /** android.lens */
+  static const float focusDistance = 0;
+  ADD_OR_SIZE(ANDROID_LENS_FOCUS_DISTANCE, &focusDistance, 1);
 
-    static const float focusDistance = 0;
-    ADD_OR_SIZE(ANDROID_LENS_FOCUS_DISTANCE, &focusDistance, 1);
+  static const float aperture = 2.8f;
+  ADD_OR_SIZE(ANDROID_LENS_APERTURE, &aperture, 1);
 
-    static const float aperture = 2.8f;
-    ADD_OR_SIZE(ANDROID_LENS_APERTURE, &aperture, 1);
+  static const float focalLength = 5.0f;
+  ADD_OR_SIZE(ANDROID_LENS_FOCAL_LENGTH, &focalLength, 1);
 
-    static const float focalLength = 5.0f;
-    ADD_OR_SIZE(ANDROID_LENS_FOCAL_LENGTH, &focalLength, 1);
+  static const float filterDensity = 0;
+  ADD_OR_SIZE(ANDROID_LENS_FILTER_DENSITY, &filterDensity, 1);
 
-    static const float filterDensity = 0;
-    ADD_OR_SIZE(ANDROID_LENS_FILTER_DENSITY, &filterDensity, 1);
+  static const uint8_t opticalStabilizationMode =
+      ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+  ADD_OR_SIZE(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+              &opticalStabilizationMode, 1);
 
-    static const uint8_t opticalStabilizationMode =
-            ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
-    ADD_OR_SIZE(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
-            &opticalStabilizationMode, 1);
+  // FOCUS_RANGE set only in frame
 
-    // FOCUS_RANGE set only in frame
+  /** android.sensor */
 
-    /** android.sensor */
+  static const int64_t exposureTime = 10 * MSEC;
+  ADD_OR_SIZE(ANDROID_SENSOR_EXPOSURE_TIME, &exposureTime, 1);
 
-    static const int64_t exposureTime = 10 * MSEC;
-    ADD_OR_SIZE(ANDROID_SENSOR_EXPOSURE_TIME, &exposureTime, 1);
+  static const int64_t frameDuration = 33333333L;  // 1/30 s
+  ADD_OR_SIZE(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
 
-    static const int64_t frameDuration = 33333333L; // 1/30 s
-    ADD_OR_SIZE(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+  static const int32_t sensitivity = 100;
+  ADD_OR_SIZE(ANDROID_SENSOR_SENSITIVITY, &sensitivity, 1);
 
-    static const int32_t sensitivity = 100;
-    ADD_OR_SIZE(ANDROID_SENSOR_SENSITIVITY, &sensitivity, 1);
+  // TIMESTAMP set only in frame
 
-    // TIMESTAMP set only in frame
+  /** android.flash */
 
-    /** android.flash */
+  static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+  ADD_OR_SIZE(ANDROID_FLASH_MODE, &flashMode, 1);
 
-    static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
-    ADD_OR_SIZE(ANDROID_FLASH_MODE, &flashMode, 1);
+  static const uint8_t flashPower = 10;
+  ADD_OR_SIZE(ANDROID_FLASH_FIRING_POWER, &flashPower, 1);
 
-    static const uint8_t flashPower = 10;
-    ADD_OR_SIZE(ANDROID_FLASH_FIRING_POWER, &flashPower, 1);
+  static const int64_t firingTime = 0;
+  ADD_OR_SIZE(ANDROID_FLASH_FIRING_TIME, &firingTime, 1);
 
-    static const int64_t firingTime = 0;
-    ADD_OR_SIZE(ANDROID_FLASH_FIRING_TIME, &firingTime, 1);
+  /** Processing block modes */
+  uint8_t hotPixelMode = 0;
+  uint8_t demosaicMode = 0;
+  uint8_t noiseMode = 0;
+  uint8_t shadingMode = 0;
+  uint8_t colorMode = 0;
+  uint8_t tonemapMode = 0;
+  uint8_t edgeMode = 0;
+  switch (request_template) {
+    case CAMERA2_TEMPLATE_STILL_CAPTURE:
+      // fall-through
+    case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+      // fall-through
+    case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+      hotPixelMode = ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY;
+      demosaicMode = ANDROID_DEMOSAIC_MODE_HIGH_QUALITY;
+      noiseMode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
+      shadingMode = ANDROID_SHADING_MODE_HIGH_QUALITY;
+      colorMode = ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY;
+      tonemapMode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
+      edgeMode = ANDROID_EDGE_MODE_HIGH_QUALITY;
+      break;
+    case CAMERA2_TEMPLATE_PREVIEW:
+      // fall-through
+    case CAMERA2_TEMPLATE_VIDEO_RECORD:
+      // fall-through
+    default:
+      hotPixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
+      demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
+      noiseMode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+      shadingMode = ANDROID_SHADING_MODE_FAST;
+      colorMode = ANDROID_COLOR_CORRECTION_MODE_FAST;
+      tonemapMode = ANDROID_TONEMAP_MODE_FAST;
+      edgeMode = ANDROID_EDGE_MODE_FAST;
+      break;
+  }
+  ADD_OR_SIZE(ANDROID_HOT_PIXEL_MODE, &hotPixelMode, 1);
+  ADD_OR_SIZE(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
+  ADD_OR_SIZE(ANDROID_NOISE_REDUCTION_MODE, &noiseMode, 1);
+  ADD_OR_SIZE(ANDROID_SHADING_MODE, &shadingMode, 1);
+  ADD_OR_SIZE(ANDROID_COLOR_CORRECTION_MODE, &colorMode, 1);
+  ADD_OR_SIZE(ANDROID_TONEMAP_MODE, &tonemapMode, 1);
+  ADD_OR_SIZE(ANDROID_EDGE_MODE, &edgeMode, 1);
 
-    /** Processing block modes */
-    uint8_t hotPixelMode = 0;
-    uint8_t demosaicMode = 0;
-    uint8_t noiseMode = 0;
-    uint8_t shadingMode = 0;
-    uint8_t colorMode = 0;
-    uint8_t tonemapMode = 0;
-    uint8_t edgeMode = 0;
-    switch (request_template) {
-      case CAMERA2_TEMPLATE_STILL_CAPTURE:
-        // fall-through
-      case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
-        // fall-through
-      case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
-        hotPixelMode = ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY;
-        demosaicMode = ANDROID_DEMOSAIC_MODE_HIGH_QUALITY;
-        noiseMode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
-        shadingMode = ANDROID_SHADING_MODE_HIGH_QUALITY;
-        colorMode = ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY;
-        tonemapMode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
-        edgeMode = ANDROID_EDGE_MODE_HIGH_QUALITY;
-        break;
-      case CAMERA2_TEMPLATE_PREVIEW:
-        // fall-through
-      case CAMERA2_TEMPLATE_VIDEO_RECORD:
-        // fall-through
-      default:
-        hotPixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
-        demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
-        noiseMode = ANDROID_NOISE_REDUCTION_MODE_FAST;
-        shadingMode = ANDROID_SHADING_MODE_FAST;
-        colorMode = ANDROID_COLOR_CORRECTION_MODE_FAST;
-        tonemapMode = ANDROID_TONEMAP_MODE_FAST;
-        edgeMode = ANDROID_EDGE_MODE_FAST;
-        break;
+  /** android.noise */
+  static const uint8_t noiseStrength = 5;
+  ADD_OR_SIZE(ANDROID_NOISE_REDUCTION_STRENGTH, &noiseStrength, 1);
+
+  /** android.color */
+  static const float colorTransform[9] = {1.0f, 0.f, 0.f, 0.f, 1.f,
+                                          0.f,  0.f, 0.f, 1.f};
+  ADD_OR_SIZE(ANDROID_COLOR_CORRECTION_TRANSFORM, colorTransform, 9);
+
+  /** android.tonemap */
+  static const float tonemapCurve[4] = {0.f, 0.f, 1.f, 1.f};
+  ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_RED, tonemapCurve, 4);
+  ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_GREEN, tonemapCurve, 4);
+  ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_BLUE, tonemapCurve, 4);
+
+  /** android.edge */
+  static const uint8_t edgeStrength = 5;
+  ADD_OR_SIZE(ANDROID_EDGE_STRENGTH, &edgeStrength, 1);
+
+  /** android.scaler */
+  static const int32_t cropRegion[3] = {0, 0,
+                                        static_cast<int32_t>(mSensorWidth)};
+  ADD_OR_SIZE(ANDROID_SCALER_CROP_REGION, cropRegion, 3);
+
+  /** android.jpeg */
+  static const int32_t jpegQuality = 80;
+  ADD_OR_SIZE(ANDROID_JPEG_QUALITY, &jpegQuality, 1);
+
+  static const int32_t thumbnailSize[2] = {640, 480};
+  ADD_OR_SIZE(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnailSize, 2);
+
+  static const int32_t thumbnailQuality = 80;
+  ADD_OR_SIZE(ANDROID_JPEG_THUMBNAIL_QUALITY, &thumbnailQuality, 1);
+
+  static const double gpsCoordinates[2] = {0, 0};
+  ADD_OR_SIZE(ANDROID_JPEG_GPS_COORDINATES, gpsCoordinates, 2);
+
+  static const uint8_t gpsProcessingMethod[32] = "None";
+  ADD_OR_SIZE(ANDROID_JPEG_GPS_PROCESSING_METHOD, gpsProcessingMethod, 32);
+
+  static const int64_t gpsTimestamp = 0;
+  ADD_OR_SIZE(ANDROID_JPEG_GPS_TIMESTAMP, &gpsTimestamp, 1);
+
+  static const int32_t jpegOrientation = 0;
+  ADD_OR_SIZE(ANDROID_JPEG_ORIENTATION, &jpegOrientation, 1);
+
+  /** android.stats */
+
+  static const uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+  ADD_OR_SIZE(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
+
+  static const uint8_t histogramMode = ANDROID_STATISTICS_HISTOGRAM_MODE_OFF;
+  ADD_OR_SIZE(ANDROID_STATISTICS_HISTOGRAM_MODE, &histogramMode, 1);
+
+  static const uint8_t sharpnessMapMode =
+      ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF;
+  ADD_OR_SIZE(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &sharpnessMapMode, 1);
+
+  // faceRectangles, faceScores, faceLandmarks, faceIds, histogram,
+  // sharpnessMap only in frames
+
+  /** android.control */
+
+  uint8_t controlIntent = 0;
+  switch (request_template) {
+    case CAMERA2_TEMPLATE_PREVIEW:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+      break;
+    case CAMERA2_TEMPLATE_STILL_CAPTURE:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+      break;
+    case CAMERA2_TEMPLATE_VIDEO_RECORD:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+      break;
+    case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
+      break;
+    case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+      break;
+    default:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
+      break;
+  }
+  ADD_OR_SIZE(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
+
+  static const uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
+  ADD_OR_SIZE(ANDROID_CONTROL_MODE, &controlMode, 1);
+
+  static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+  ADD_OR_SIZE(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
+
+  static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+  ADD_OR_SIZE(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+
+  static const uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH;
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
+
+  static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
+
+  static const int32_t controlRegions[5] = {
+      0, 0, static_cast<int32_t>(mSensorWidth),
+      static_cast<int32_t>(mSensorHeight), 1000};
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_REGIONS, controlRegions, 5);
+
+  static const int32_t aeExpCompensation = 0;
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &aeExpCompensation, 1);
+
+  static const int32_t aeTargetFpsRange[2] = {10, 30};
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, aeTargetFpsRange, 2);
+
+  static const uint8_t aeAntibandingMode =
+      ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+  ADD_OR_SIZE(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &aeAntibandingMode, 1);
+
+  static const uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+  ADD_OR_SIZE(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
+
+  static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+  ADD_OR_SIZE(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
+
+  ADD_OR_SIZE(ANDROID_CONTROL_AWB_REGIONS, controlRegions, 5);
+
+  uint8_t afMode = 0;
+  switch (request_template) {
+    case CAMERA2_TEMPLATE_PREVIEW:
+      afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+      break;
+    case CAMERA2_TEMPLATE_STILL_CAPTURE:
+      afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+      break;
+    case CAMERA2_TEMPLATE_VIDEO_RECORD:
+      afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+      break;
+    case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
+      afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+      break;
+    case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
+      afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+      break;
+    default:
+      afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+      break;
+  }
+  ADD_OR_SIZE(ANDROID_CONTROL_AF_MODE, &afMode, 1);
+
+  ADD_OR_SIZE(ANDROID_CONTROL_AF_REGIONS, controlRegions, 5);
+
+  static const uint8_t vstabMode = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+  ADD_OR_SIZE(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vstabMode, 1);
+
+  // aeState, awbState, afState only in frame
+
+  /** Allocate metadata if sizing */
+  if (sizeRequest) {
+    ALOGV(
+        "Allocating %zu entries, %zu extra bytes for "
+        "request template type %d",
+        entryCount, dataCount, request_template);
+    *request = allocate_camera_metadata(entryCount, dataCount);
+    if (*request == NULL) {
+      ALOGE(
+          "Unable to allocate new request template type %d "
+          "(%zu entries, %zu bytes extra data)",
+          request_template, entryCount, dataCount);
+      return NO_MEMORY;
     }
-    ADD_OR_SIZE(ANDROID_HOT_PIXEL_MODE, &hotPixelMode, 1);
-    ADD_OR_SIZE(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
-    ADD_OR_SIZE(ANDROID_NOISE_REDUCTION_MODE, &noiseMode, 1);
-    ADD_OR_SIZE(ANDROID_SHADING_MODE, &shadingMode, 1);
-    ADD_OR_SIZE(ANDROID_COLOR_CORRECTION_MODE, &colorMode, 1);
-    ADD_OR_SIZE(ANDROID_TONEMAP_MODE, &tonemapMode, 1);
-    ADD_OR_SIZE(ANDROID_EDGE_MODE, &edgeMode, 1);
-
-    /** android.noise */
-    static const uint8_t noiseStrength = 5;
-    ADD_OR_SIZE(ANDROID_NOISE_REDUCTION_STRENGTH, &noiseStrength, 1);
-
-    /** android.color */
-    static const float colorTransform[9] = {
-        1.0f, 0.f, 0.f,
-        0.f, 1.f, 0.f,
-        0.f, 0.f, 1.f
-    };
-    ADD_OR_SIZE(ANDROID_COLOR_CORRECTION_TRANSFORM, colorTransform, 9);
-
-    /** android.tonemap */
-    static const float tonemapCurve[4] = {
-        0.f, 0.f,
-        1.f, 1.f
-    };
-    ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_RED, tonemapCurve, 4);
-    ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_GREEN, tonemapCurve, 4);
-    ADD_OR_SIZE(ANDROID_TONEMAP_CURVE_BLUE, tonemapCurve, 4);
-
-    /** android.edge */
-    static const uint8_t edgeStrength = 5;
-    ADD_OR_SIZE(ANDROID_EDGE_STRENGTH, &edgeStrength, 1);
-
-    /** android.scaler */
-    static const int32_t cropRegion[3] = {
-        0, 0, static_cast<int32_t>(mSensorWidth)
-    };
-    ADD_OR_SIZE(ANDROID_SCALER_CROP_REGION, cropRegion, 3);
-
-    /** android.jpeg */
-    static const int32_t jpegQuality = 80;
-    ADD_OR_SIZE(ANDROID_JPEG_QUALITY, &jpegQuality, 1);
-
-    static const int32_t thumbnailSize[2] = {
-        640, 480
-    };
-    ADD_OR_SIZE(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnailSize, 2);
-
-    static const int32_t thumbnailQuality = 80;
-    ADD_OR_SIZE(ANDROID_JPEG_THUMBNAIL_QUALITY, &thumbnailQuality, 1);
-
-    static const double gpsCoordinates[2] = {
-        0, 0
-    };
-    ADD_OR_SIZE(ANDROID_JPEG_GPS_COORDINATES, gpsCoordinates, 2);
-
-    static const uint8_t gpsProcessingMethod[32] = "None";
-    ADD_OR_SIZE(ANDROID_JPEG_GPS_PROCESSING_METHOD, gpsProcessingMethod, 32);
-
-    static const int64_t gpsTimestamp = 0;
-    ADD_OR_SIZE(ANDROID_JPEG_GPS_TIMESTAMP, &gpsTimestamp, 1);
-
-    static const int32_t jpegOrientation = 0;
-    ADD_OR_SIZE(ANDROID_JPEG_ORIENTATION, &jpegOrientation, 1);
-
-    /** android.stats */
-
-    static const uint8_t faceDetectMode =
-        ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
-    ADD_OR_SIZE(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
-
-    static const uint8_t histogramMode = ANDROID_STATISTICS_HISTOGRAM_MODE_OFF;
-    ADD_OR_SIZE(ANDROID_STATISTICS_HISTOGRAM_MODE, &histogramMode, 1);
-
-    static const uint8_t sharpnessMapMode =
-        ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF;
-    ADD_OR_SIZE(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &sharpnessMapMode, 1);
-
-    // faceRectangles, faceScores, faceLandmarks, faceIds, histogram,
-    // sharpnessMap only in frames
-
-    /** android.control */
-
-    uint8_t controlIntent = 0;
-    switch (request_template) {
-      case CAMERA2_TEMPLATE_PREVIEW:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
-        break;
-      case CAMERA2_TEMPLATE_STILL_CAPTURE:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
-        break;
-      case CAMERA2_TEMPLATE_VIDEO_RECORD:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
-        break;
-      case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
-        break;
-      case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
-        break;
-      default:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
-        break;
-    }
-    ADD_OR_SIZE(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
-
-    static const uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
-    ADD_OR_SIZE(ANDROID_CONTROL_MODE, &controlMode, 1);
-
-    static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
-    ADD_OR_SIZE(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
-
-    static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
-    ADD_OR_SIZE(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
-
-    static const uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH;
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
-
-    static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
-
-    static const int32_t controlRegions[5] = {
-        0, 0,
-        static_cast<int32_t>(mSensorWidth),
-        static_cast<int32_t>(mSensorHeight),
-        1000
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_REGIONS, controlRegions, 5);
-
-    static const int32_t aeExpCompensation = 0;
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &aeExpCompensation, 1);
-
-    static const int32_t aeTargetFpsRange[2] = {
-        10, 30
-    };
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, aeTargetFpsRange, 2);
-
-    static const uint8_t aeAntibandingMode =
-            ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
-    ADD_OR_SIZE(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &aeAntibandingMode, 1);
-
-    static const uint8_t awbMode =
-            ANDROID_CONTROL_AWB_MODE_AUTO;
-    ADD_OR_SIZE(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
-
-    static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
-    ADD_OR_SIZE(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
-
-    ADD_OR_SIZE(ANDROID_CONTROL_AWB_REGIONS, controlRegions, 5);
-
-    uint8_t afMode = 0;
-    switch (request_template) {
-      case CAMERA2_TEMPLATE_PREVIEW:
-        afMode = ANDROID_CONTROL_AF_MODE_AUTO;
-        break;
-      case CAMERA2_TEMPLATE_STILL_CAPTURE:
-        afMode = ANDROID_CONTROL_AF_MODE_AUTO;
-        break;
-      case CAMERA2_TEMPLATE_VIDEO_RECORD:
-        afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
-        break;
-      case CAMERA2_TEMPLATE_VIDEO_SNAPSHOT:
-        afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
-        break;
-      case CAMERA2_TEMPLATE_ZERO_SHUTTER_LAG:
-        afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
-        break;
-      default:
-        afMode = ANDROID_CONTROL_AF_MODE_AUTO;
-        break;
-    }
-    ADD_OR_SIZE(ANDROID_CONTROL_AF_MODE, &afMode, 1);
-
-    ADD_OR_SIZE(ANDROID_CONTROL_AF_REGIONS, controlRegions, 5);
-
-    static const uint8_t vstabMode =
-        ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
-    ADD_OR_SIZE(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vstabMode, 1);
-
-    // aeState, awbState, afState only in frame
-
-    /** Allocate metadata if sizing */
-    if (sizeRequest) {
-        ALOGV("Allocating %zu entries, %zu extra bytes for "
-                "request template type %d",
-                entryCount, dataCount, request_template);
-        *request = allocate_camera_metadata(entryCount, dataCount);
-        if (*request == NULL) {
-            ALOGE("Unable to allocate new request template type %d "
-                    "(%zu entries, %zu bytes extra data)", request_template,
-                    entryCount, dataCount);
-            return NO_MEMORY;
-        }
-    }
-    return OK;
+  }
+  return OK;
 #undef ADD_OR_SIZE
 }
 
 status_t EmulatedFakeCamera2::addOrSize(camera_metadata_t *request,
-        bool sizeRequest,
-        size_t *entryCount,
-        size_t *dataCount,
-        uint32_t tag,
-        const void *entryData,
-        size_t entryDataCount) {
-    status_t res;
-    if (!sizeRequest) {
-        return add_camera_metadata_entry(request, tag, entryData,
-                entryDataCount);
-    } else {
-        int type = get_camera_metadata_tag_type(tag);
-        if (type < 0 ) return BAD_VALUE;
-        (*entryCount)++;
-        (*dataCount) += calculate_camera_metadata_entry_data_size(type,
-                entryDataCount);
-        return OK;
-    }
+                                        bool sizeRequest, size_t *entryCount,
+                                        size_t *dataCount, uint32_t tag,
+                                        const void *entryData,
+                                        size_t entryDataCount) {
+  status_t res;
+  if (!sizeRequest) {
+    return add_camera_metadata_entry(request, tag, entryData, entryDataCount);
+  } else {
+    int type = get_camera_metadata_tag_type(tag);
+    if (type < 0) return BAD_VALUE;
+    (*entryCount)++;
+    (*dataCount) +=
+        calculate_camera_metadata_entry_data_size(type, entryDataCount);
+    return OK;
+  }
 }
 
 bool EmulatedFakeCamera2::isStreamInUse(uint32_t id) {
-    // Assumes mMutex is locked; otherwise new requests could enter
-    // configureThread while readoutThread is being checked
+  // Assumes mMutex is locked; otherwise new requests could enter
+  // configureThread while readoutThread is being checked
 
-    // Order of isStreamInUse calls matters
-    if (mConfigureThread->isStreamInUse(id) ||
-            mReadoutThread->isStreamInUse(id) ||
-            mJpegCompressor->isStreamInUse(id) ) {
-        ALOGE("%s: Stream %d is in use in active requests!",
-                __FUNCTION__, id);
-        return true;
-    }
-    return false;
+  // Order of isStreamInUse calls matters
+  if (mConfigureThread->isStreamInUse(id) ||
+      mReadoutThread->isStreamInUse(id) || mJpegCompressor->isStreamInUse(id)) {
+    ALOGE("%s: Stream %d is in use in active requests!", __FUNCTION__, id);
+    return true;
+  }
+  return false;
 }
 
 bool EmulatedFakeCamera2::isReprocessStreamInUse(uint32_t id) {
-    // TODO: implement
-    return false;
+  // TODO: implement
+  return false;
 }
 
-const Stream& EmulatedFakeCamera2::getStreamInfo(uint32_t streamId) {
-    Mutex::Autolock lock(mMutex);
+const Stream &EmulatedFakeCamera2::getStreamInfo(uint32_t streamId) {
+  Mutex::Autolock lock(mMutex);
 
-    return mStreams.valueFor(streamId);
+  return mStreams.valueFor(streamId);
 }
 
-const ReprocessStream& EmulatedFakeCamera2::getReprocessStreamInfo(uint32_t streamId) {
-    Mutex::Autolock lock(mMutex);
+const ReprocessStream &EmulatedFakeCamera2::getReprocessStreamInfo(
+    uint32_t streamId) {
+  Mutex::Autolock lock(mMutex);
 
-    return mReprocessStreams.valueFor(streamId);
+  return mReprocessStreams.valueFor(streamId);
 }
 
-};  /* namespace android */
+}; /* namespace android */
diff --git a/guest/hals/camera/EmulatedFakeCamera2.h b/guest/hals/camera/EmulatedFakeCamera2.h
index c622d26..b55d012 100644
--- a/guest/hals/camera/EmulatedFakeCamera2.h
+++ b/guest/hals/camera/EmulatedFakeCamera2.h
@@ -25,14 +25,14 @@
 
 #include <vector>
 
-#include "EmulatedCamera2.h"
-#include "fake-pipeline2/Base.h"
-#include "fake-pipeline2/Sensor.h"
-#include "fake-pipeline2/JpegCompressor.h"
 #include <utils/Condition.h>
 #include <utils/KeyedVector.h>
-#include <utils/String8.h>
 #include <utils/String16.h>
+#include <utils/String8.h>
+#include "EmulatedCamera2.h"
+#include "fake-pipeline2/Base.h"
+#include "fake-pipeline2/JpegCompressor.h"
+#include "fake-pipeline2/Sensor.h"
 
 namespace android {
 
@@ -40,398 +40,382 @@
  * a simple simulation of a scene, sensor, and image processing pipeline.
  */
 class EmulatedFakeCamera2 : public EmulatedCamera2 {
-public:
-    /* Constructs EmulatedFakeCamera instance. */
-    EmulatedFakeCamera2(int cameraId, bool facingBack, struct hw_module_t* module);
+ public:
+  /* Constructs EmulatedFakeCamera instance. */
+  EmulatedFakeCamera2(int cameraId, bool facingBack,
+                      struct hw_module_t *module);
 
-    /* Destructs EmulatedFakeCamera instance. */
-    ~EmulatedFakeCamera2();
+  /* Destructs EmulatedFakeCamera instance. */
+  ~EmulatedFakeCamera2();
 
-    /****************************************************************************
-     * EmulatedCamera2 virtual overrides.
-     ***************************************************************************/
+  /****************************************************************************
+   * EmulatedCamera2 virtual overrides.
+   ***************************************************************************/
 
-public:
-    /* Initializes EmulatedFakeCamera2 instance. */
-    status_t Initialize(const cvd::CameraDefinition& props);
+ public:
+  /* Initializes EmulatedFakeCamera2 instance. */
+  status_t Initialize(const cvd::CameraDefinition &props);
 
-    /****************************************************************************
-     * Camera Module API and generic hardware device API implementation
-     ***************************************************************************/
-public:
+  /****************************************************************************
+   * Camera Module API and generic hardware device API implementation
+   ***************************************************************************/
+ public:
+  virtual status_t connectCamera(hw_device_t **device);
 
-    virtual status_t connectCamera(hw_device_t** device);
+  virtual status_t plugCamera();
+  virtual status_t unplugCamera();
+  virtual camera_device_status_t getHotplugStatus();
 
-    virtual status_t plugCamera();
-    virtual status_t unplugCamera();
-    virtual camera_device_status_t getHotplugStatus();
+  virtual status_t closeCamera();
 
-    virtual status_t closeCamera();
+  virtual status_t getCameraInfo(struct camera_info *info);
 
-    virtual status_t getCameraInfo(struct camera_info *info);
+  /****************************************************************************
+   * EmulatedCamera2 abstract API implementation.
+   ***************************************************************************/
+ protected:
+  /** Request input queue */
 
-    /****************************************************************************
-     * EmulatedCamera2 abstract API implementation.
-     ***************************************************************************/
-protected:
-    /** Request input queue */
+  virtual int requestQueueNotify();
 
-    virtual int requestQueueNotify();
+  /** Count of requests in flight */
+  virtual int getInProgressCount();
 
-    /** Count of requests in flight */
-    virtual int getInProgressCount();
+  /** Cancel all captures in flight */
+  // virtual int flushCapturesInProgress();
 
-    /** Cancel all captures in flight */
-    //virtual int flushCapturesInProgress();
+  /** Construct default request */
+  virtual int constructDefaultRequest(int request_template,
+                                      camera_metadata_t **request);
 
-    /** Construct default request */
-    virtual int constructDefaultRequest(
-            int request_template,
-            camera_metadata_t **request);
+  virtual int allocateStream(uint32_t width, uint32_t height, int format,
+                             const camera2_stream_ops_t *stream_ops,
+                             uint32_t *stream_id, uint32_t *format_actual,
+                             uint32_t *usage, uint32_t *max_buffers);
 
-    virtual int allocateStream(
-            uint32_t width,
-            uint32_t height,
-            int format,
-            const camera2_stream_ops_t *stream_ops,
-            uint32_t *stream_id,
-            uint32_t *format_actual,
-            uint32_t *usage,
-            uint32_t *max_buffers);
+  virtual int registerStreamBuffers(uint32_t stream_id, int num_buffers,
+                                    buffer_handle_t *buffers);
 
-    virtual int registerStreamBuffers(
-            uint32_t stream_id,
-            int num_buffers,
-            buffer_handle_t *buffers);
+  virtual int releaseStream(uint32_t stream_id);
 
-    virtual int releaseStream(uint32_t stream_id);
+  // virtual int allocateReprocessStream(
+  //         uint32_t width,
+  //         uint32_t height,
+  //         uint32_t format,
+  //         const camera2_stream_ops_t *stream_ops,
+  //         uint32_t *stream_id,
+  //         uint32_t *format_actual,
+  //         uint32_t *usage,
+  //         uint32_t *max_buffers);
 
-    // virtual int allocateReprocessStream(
-    //         uint32_t width,
-    //         uint32_t height,
-    //         uint32_t format,
-    //         const camera2_stream_ops_t *stream_ops,
-    //         uint32_t *stream_id,
-    //         uint32_t *format_actual,
-    //         uint32_t *usage,
-    //         uint32_t *max_buffers);
+  virtual int allocateReprocessStreamFromStream(
+      uint32_t output_stream_id, const camera2_stream_in_ops_t *stream_ops,
+      uint32_t *stream_id);
 
-    virtual int allocateReprocessStreamFromStream(
-            uint32_t output_stream_id,
-            const camera2_stream_in_ops_t *stream_ops,
-            uint32_t *stream_id);
+  virtual int releaseReprocessStream(uint32_t stream_id);
 
-    virtual int releaseReprocessStream(uint32_t stream_id);
+  virtual int triggerAction(uint32_t trigger_id, int32_t ext1, int32_t ext2);
 
-    virtual int triggerAction(uint32_t trigger_id,
-            int32_t ext1,
-            int32_t ext2);
+  /** Debug methods */
 
-    /** Debug methods */
+  virtual int dump(int fd);
 
-    virtual int dump(int fd);
+ public:
+  /****************************************************************************
+   * Utility methods called by configure/readout threads and pipeline
+   ***************************************************************************/
 
-public:
-    /****************************************************************************
-     * Utility methods called by configure/readout threads and pipeline
-     ***************************************************************************/
+  // Get information about a given stream. Will lock mMutex
+  const Stream &getStreamInfo(uint32_t streamId);
+  const ReprocessStream &getReprocessStreamInfo(uint32_t streamId);
 
-    // Get information about a given stream. Will lock mMutex
-    const Stream &getStreamInfo(uint32_t streamId);
-    const ReprocessStream &getReprocessStreamInfo(uint32_t streamId);
+  // Notifies rest of camera subsystem of serious error
+  void signalError();
 
-    // Notifies rest of camera subsystem of serious error
-    void signalError();
+ private:
+  /****************************************************************************
+   * Utility methods
+   ***************************************************************************/
+  /** Construct static camera metadata, two-pass */
+  status_t constructStaticInfo(camera_metadata_t **info,
+                               bool sizeRequest) const;
 
-private:
-    /****************************************************************************
-     * Utility methods
-     ***************************************************************************/
-    /** Construct static camera metadata, two-pass */
-    status_t constructStaticInfo(
-            camera_metadata_t **info,
-            bool sizeRequest) const;
+  /** Two-pass implementation of constructDefaultRequest */
+  status_t constructDefaultRequest(int request_template,
+                                   camera_metadata_t **request,
+                                   bool sizeRequest) const;
+  /** Helper function for constructDefaultRequest */
+  static status_t addOrSize(camera_metadata_t *request, bool sizeRequest,
+                            size_t *entryCount, size_t *dataCount, uint32_t tag,
+                            const void *entry_data, size_t entry_count);
 
-    /** Two-pass implementation of constructDefaultRequest */
-    status_t constructDefaultRequest(
-            int request_template,
-            camera_metadata_t **request,
-            bool sizeRequest) const;
-    /** Helper function for constructDefaultRequest */
-    static status_t addOrSize( camera_metadata_t *request,
-            bool sizeRequest,
-            size_t *entryCount,
-            size_t *dataCount,
-            uint32_t tag,
-            const void *entry_data,
-            size_t entry_count);
+  /** Determine if the stream id is listed in any currently-in-flight
+   * requests. Assumes mMutex is locked */
+  bool isStreamInUse(uint32_t streamId);
 
-    /** Determine if the stream id is listed in any currently-in-flight
-     * requests. Assumes mMutex is locked */
-    bool isStreamInUse(uint32_t streamId);
+  /** Determine if the reprocess stream id is listed in any
+   * currently-in-flight requests. Assumes mMutex is locked */
+  bool isReprocessStreamInUse(uint32_t streamId);
 
-    /** Determine if the reprocess stream id is listed in any
-     * currently-in-flight requests. Assumes mMutex is locked */
-    bool isReprocessStreamInUse(uint32_t streamId);
+  /****************************************************************************
+   * Pipeline controller threads
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Pipeline controller threads
-     ***************************************************************************/
+  class ConfigureThread : public Thread {
+   public:
+    ConfigureThread(EmulatedFakeCamera2 *parent);
+    ~ConfigureThread();
 
-    class ConfigureThread: public Thread {
-      public:
-        ConfigureThread(EmulatedFakeCamera2 *parent);
-        ~ConfigureThread();
+    status_t waitUntilRunning();
+    status_t newRequestAvailable();
+    status_t readyToRun();
 
-        status_t waitUntilRunning();
-        status_t newRequestAvailable();
-        status_t readyToRun();
+    bool isStreamInUse(uint32_t id);
+    int getInProgressCount();
 
-        bool isStreamInUse(uint32_t id);
-        int getInProgressCount();
-      private:
-        EmulatedFakeCamera2 *mParent;
-        static const nsecs_t kWaitPerLoop = 10000000L; // 10 ms
+   private:
+    EmulatedFakeCamera2 *mParent;
+    static const nsecs_t kWaitPerLoop = 10000000L;  // 10 ms
 
-        bool mRunning;
-        bool threadLoop();
+    bool mRunning;
+    bool threadLoop();
 
-        bool setupCapture();
-        bool setupReprocess();
+    bool setupCapture();
+    bool setupReprocess();
 
-        bool configureNextCapture();
-        bool configureNextReprocess();
+    bool configureNextCapture();
+    bool configureNextReprocess();
 
-        bool getBuffers();
+    bool getBuffers();
 
-        Mutex mInputMutex; // Protects mActive, mRequestCount
-        Condition mInputSignal;
-        bool mActive; // Whether we're waiting for input requests or actively
-                      // working on them
-        size_t mRequestCount;
+    Mutex mInputMutex;  // Protects mActive, mRequestCount
+    Condition mInputSignal;
+    bool mActive;  // Whether we're waiting for input requests or actively
+                   // working on them
+    size_t mRequestCount;
 
-        camera_metadata_t *mRequest;
+    camera_metadata_t *mRequest;
 
-        Mutex mInternalsMutex; // Lock before accessing below members.
-        bool    mWaitingForReadout;
-        bool    mNextNeedsJpeg;
-        bool    mNextIsCapture;
-        int32_t mNextFrameNumber;
-        int64_t mNextExposureTime;
-        int64_t mNextFrameDuration;
-        int32_t mNextSensitivity;
-        Buffers *mNextBuffers;
-    };
+    Mutex mInternalsMutex;  // Lock before accessing below members.
+    bool mWaitingForReadout;
+    bool mNextNeedsJpeg;
+    bool mNextIsCapture;
+    int32_t mNextFrameNumber;
+    int64_t mNextExposureTime;
+    int64_t mNextFrameDuration;
+    int32_t mNextSensitivity;
+    Buffers *mNextBuffers;
+  };
 
-    class ReadoutThread: public Thread, private JpegCompressor::JpegListener {
-      public:
-        ReadoutThread(EmulatedFakeCamera2 *parent);
-        ~ReadoutThread();
+  class ReadoutThread : public Thread, private JpegCompressor::JpegListener {
+   public:
+    ReadoutThread(EmulatedFakeCamera2 *parent);
+    ~ReadoutThread();
 
-        status_t readyToRun();
+    status_t readyToRun();
 
-        // Input
-        status_t waitUntilRunning();
-        bool waitForReady(nsecs_t timeout);
-        void setNextOperation(bool isCapture,
-                camera_metadata_t *request,
-                Buffers *buffers);
-        bool isStreamInUse(uint32_t id);
-        int getInProgressCount();
-      private:
-        EmulatedFakeCamera2 *mParent;
+    // Input
+    status_t waitUntilRunning();
+    bool waitForReady(nsecs_t timeout);
+    void setNextOperation(bool isCapture, camera_metadata_t *request,
+                          Buffers *buffers);
+    bool isStreamInUse(uint32_t id);
+    int getInProgressCount();
+
+   private:
+    EmulatedFakeCamera2 *mParent;
+
+    bool mRunning;
+    bool threadLoop();
 
-        bool mRunning;
-        bool threadLoop();
+    bool readyForNextCapture();
+    status_t collectStatisticsMetadata(camera_metadata_t *frame);
 
-        bool readyForNextCapture();
-        status_t collectStatisticsMetadata(camera_metadata_t *frame);
+    // Inputs
+    Mutex mInputMutex;  // Protects mActive, mInFlightQueue, mRequestCount
+    Condition mInputSignal;
+    Condition mReadySignal;
 
-        // Inputs
-        Mutex mInputMutex; // Protects mActive, mInFlightQueue, mRequestCount
-        Condition mInputSignal;
-        Condition mReadySignal;
+    bool mActive;
 
-        bool mActive;
+    static const int kInFlightQueueSize = 4;
+    struct InFlightQueue {
+      bool isCapture;
+      camera_metadata_t *request;
+      Buffers *buffers;
+    } * mInFlightQueue;
 
-        static const int kInFlightQueueSize = 4;
-        struct InFlightQueue {
-            bool isCapture;
-            camera_metadata_t *request;
-            Buffers *buffers;
-        } *mInFlightQueue;
+    size_t mInFlightHead;
+    size_t mInFlightTail;
 
-        size_t mInFlightHead;
-        size_t mInFlightTail;
+    size_t mRequestCount;
 
-        size_t mRequestCount;
+    // Internals
+    Mutex mInternalsMutex;
 
-        // Internals
-        Mutex mInternalsMutex;
+    bool mIsCapture;
+    camera_metadata_t *mRequest;
+    Buffers *mBuffers;
 
-        bool mIsCapture;
-        camera_metadata_t *mRequest;
-        Buffers *mBuffers;
+    // Jpeg completion listeners
+    void onJpegDone(const StreamBuffer &jpegBuffer, bool success);
+    void onJpegInputDone(const StreamBuffer &inputBuffer);
+    nsecs_t mJpegTimestamp;
+  };
 
-        // Jpeg completion listeners
-        void onJpegDone(const StreamBuffer &jpegBuffer, bool success);
-        void onJpegInputDone(const StreamBuffer &inputBuffer);
-        nsecs_t mJpegTimestamp;
-    };
+  // 3A management thread (auto-exposure, focus, white balance)
+  class ControlThread : public Thread {
+   public:
+    ControlThread(EmulatedFakeCamera2 *parent);
+    ~ControlThread();
 
-    // 3A management thread (auto-exposure, focus, white balance)
-    class ControlThread: public Thread {
-      public:
-        ControlThread(EmulatedFakeCamera2 *parent);
-        ~ControlThread();
+    status_t readyToRun();
 
-        status_t readyToRun();
+    status_t waitUntilRunning();
 
-        status_t waitUntilRunning();
+    // Interpret request's control parameters and override
+    // capture settings as needed
+    status_t processRequest(camera_metadata_t *request);
 
-        // Interpret request's control parameters and override
-        // capture settings as needed
-        status_t processRequest(camera_metadata_t *request);
+    status_t triggerAction(uint32_t msgType, int32_t ext1, int32_t ext2);
 
-        status_t triggerAction(uint32_t msgType,
-                int32_t ext1, int32_t ext2);
-      private:
-        ControlThread(const ControlThread &t);
-        ControlThread& operator=(const ControlThread &t);
+   private:
+    ControlThread(const ControlThread &t);
+    ControlThread &operator=(const ControlThread &t);
 
-        // Constants controlling fake 3A behavior
-        static const nsecs_t kControlCycleDelay;
-        static const nsecs_t kMinAfDuration;
-        static const nsecs_t kMaxAfDuration;
-        static const float kAfSuccessRate;
-        static const float kContinuousAfStartRate;
+    // Constants controlling fake 3A behavior
+    static const nsecs_t kControlCycleDelay;
+    static const nsecs_t kMinAfDuration;
+    static const nsecs_t kMaxAfDuration;
+    static const float kAfSuccessRate;
+    static const float kContinuousAfStartRate;
 
-        static const float kAeScanStartRate;
-        static const nsecs_t kMinAeDuration;
-        static const nsecs_t kMaxAeDuration;
-        static const nsecs_t kMinPrecaptureAeDuration;
-        static const nsecs_t kMaxPrecaptureAeDuration;
+    static const float kAeScanStartRate;
+    static const nsecs_t kMinAeDuration;
+    static const nsecs_t kMaxAeDuration;
+    static const nsecs_t kMinPrecaptureAeDuration;
+    static const nsecs_t kMaxPrecaptureAeDuration;
 
-        static const nsecs_t kNormalExposureTime;
-        static const nsecs_t kExposureJump;
-        static const nsecs_t kMinExposureTime;
+    static const nsecs_t kNormalExposureTime;
+    static const nsecs_t kExposureJump;
+    static const nsecs_t kMinExposureTime;
 
-        EmulatedFakeCamera2 *mParent;
+    EmulatedFakeCamera2 *mParent;
 
-        bool mRunning;
-        bool threadLoop();
+    bool mRunning;
+    bool threadLoop();
 
-        Mutex mInputMutex; // Protects input methods
-        Condition mInputSignal;
+    Mutex mInputMutex;  // Protects input methods
+    Condition mInputSignal;
 
-        // Trigger notifications
-        bool mStartAf;
-        bool mCancelAf;
-        bool mStartPrecapture;
+    // Trigger notifications
+    bool mStartAf;
+    bool mCancelAf;
+    bool mStartPrecapture;
 
-        // Latest state for 3A request fields
-        uint8_t mControlMode;
+    // Latest state for 3A request fields
+    uint8_t mControlMode;
 
-        uint8_t mEffectMode;
-        uint8_t mSceneMode;
+    uint8_t mEffectMode;
+    uint8_t mSceneMode;
 
-        uint8_t mAfMode;
-        bool mAfModeChange;
+    uint8_t mAfMode;
+    bool mAfModeChange;
 
-        uint8_t mAwbMode;
-        uint8_t mAeMode;
+    uint8_t mAwbMode;
+    uint8_t mAeMode;
 
-        // Latest trigger IDs
-        int32_t mAfTriggerId;
-        int32_t mPrecaptureTriggerId;
+    // Latest trigger IDs
+    int32_t mAfTriggerId;
+    int32_t mPrecaptureTriggerId;
 
-        // Current state for 3A algorithms
-        uint8_t mAfState;
-        uint8_t mAeState;
-        uint8_t mAwbState;
-        bool    mAeLock;
+    // Current state for 3A algorithms
+    uint8_t mAfState;
+    uint8_t mAeState;
+    uint8_t mAwbState;
+    bool mAeLock;
 
-        // Current control parameters
-        nsecs_t mExposureTime;
+    // Current control parameters
+    nsecs_t mExposureTime;
 
-        // Private to threadLoop and its utility methods
+    // Private to threadLoop and its utility methods
 
-        nsecs_t mAfScanDuration;
-        nsecs_t mAeScanDuration;
-        bool mLockAfterPassiveScan;
+    nsecs_t mAfScanDuration;
+    nsecs_t mAeScanDuration;
+    bool mLockAfterPassiveScan;
 
-        // Utility methods for AF
-        int processAfTrigger(uint8_t afMode, uint8_t afState);
-        int maybeStartAfScan(uint8_t afMode, uint8_t afState);
-        int updateAfScan(uint8_t afMode, uint8_t afState, nsecs_t *maxSleep);
-        void updateAfState(uint8_t newState, int32_t triggerId);
+    // Utility methods for AF
+    int processAfTrigger(uint8_t afMode, uint8_t afState);
+    int maybeStartAfScan(uint8_t afMode, uint8_t afState);
+    int updateAfScan(uint8_t afMode, uint8_t afState, nsecs_t *maxSleep);
+    void updateAfState(uint8_t newState, int32_t triggerId);
 
-        // Utility methods for precapture trigger
-        int processPrecaptureTrigger(uint8_t aeMode, uint8_t aeState);
-        int maybeStartAeScan(uint8_t aeMode, bool aeLock, uint8_t aeState);
-        int updateAeScan(uint8_t aeMode, bool aeLock, uint8_t aeState,
-                nsecs_t *maxSleep);
-        void updateAeState(uint8_t newState, int32_t triggerId);
-    };
+    // Utility methods for precapture trigger
+    int processPrecaptureTrigger(uint8_t aeMode, uint8_t aeState);
+    int maybeStartAeScan(uint8_t aeMode, bool aeLock, uint8_t aeState);
+    int updateAeScan(uint8_t aeMode, bool aeLock, uint8_t aeState,
+                     nsecs_t *maxSleep);
+    void updateAeState(uint8_t newState, int32_t triggerId);
+  };
 
-    /****************************************************************************
-     * Static configuration information
-     ***************************************************************************/
-private:
-    static const uint32_t kMaxRawStreamCount = 1;
-    static const uint32_t kMaxProcessedStreamCount = 3;
-    static const uint32_t kMaxJpegStreamCount = 1;
-    static const uint32_t kMaxReprocessStreamCount = 2;
-    static const uint32_t kMaxBufferCount = 4;
-    static const uint32_t kAvailableFormats[];
-    static const uint32_t kAvailableRawSizes[];
-    static const uint64_t kAvailableRawMinDurations[];
-    static const uint32_t kAvailableProcessedSizesBack[];
-    static const uint32_t kAvailableProcessedSizesFront[];
-    static const uint64_t kAvailableProcessedMinDurations[];
-    static const uint32_t kAvailableJpegSizesBack[];
-    static const uint32_t kAvailableJpegSizesFront[];
-    static const uint64_t kAvailableJpegMinDurations[];
+  /****************************************************************************
+   * Static configuration information
+   ***************************************************************************/
+ private:
+  static const uint32_t kMaxRawStreamCount = 1;
+  static const uint32_t kMaxProcessedStreamCount = 3;
+  static const uint32_t kMaxJpegStreamCount = 1;
+  static const uint32_t kMaxReprocessStreamCount = 2;
+  static const uint32_t kMaxBufferCount = 4;
+  static const uint32_t kAvailableFormats[];
+  static const uint32_t kAvailableRawSizes[];
+  static const uint64_t kAvailableRawMinDurations[];
+  static const uint32_t kAvailableProcessedSizesBack[];
+  static const uint32_t kAvailableProcessedSizesFront[];
+  static const uint64_t kAvailableProcessedMinDurations[];
+  static const uint32_t kAvailableJpegSizesBack[];
+  static const uint32_t kAvailableJpegSizesFront[];
+  static const uint64_t kAvailableJpegMinDurations[];
 
-    /****************************************************************************
-     * Data members.
-     ***************************************************************************/
+  /****************************************************************************
+   * Data members.
+   ***************************************************************************/
 
-protected:
-    /* Facing back (true) or front (false) switch. */
-    bool mFacingBack;
+ protected:
+  /* Facing back (true) or front (false) switch. */
+  bool mFacingBack;
 
-private:
-    bool mIsConnected;
+ private:
+  bool mIsConnected;
 
-    int32_t mSensorWidth, mSensorHeight;
+  int32_t mSensorWidth, mSensorHeight;
 
-    /** Stream manipulation */
-    uint32_t mNextStreamId;
-    uint32_t mRawStreamCount;
-    uint32_t mProcessedStreamCount;
-    uint32_t mJpegStreamCount;
+  /** Stream manipulation */
+  uint32_t mNextStreamId;
+  uint32_t mRawStreamCount;
+  uint32_t mProcessedStreamCount;
+  uint32_t mJpegStreamCount;
 
-    std::vector<uint32_t> mAvailableRawSizes;
-    std::vector<uint32_t> mAvailableProcessedSizes;
-    std::vector<uint32_t> mAvailableJpegSizes;
+  std::vector<uint32_t> mAvailableRawSizes;
+  std::vector<uint32_t> mAvailableProcessedSizes;
+  std::vector<uint32_t> mAvailableJpegSizes;
 
-    uint32_t mNextReprocessStreamId;
-    uint32_t mReprocessStreamCount;
+  uint32_t mNextReprocessStreamId;
+  uint32_t mReprocessStreamCount;
 
-    KeyedVector<uint32_t, Stream> mStreams;
-    KeyedVector<uint32_t, ReprocessStream> mReprocessStreams;
+  KeyedVector<uint32_t, Stream> mStreams;
+  KeyedVector<uint32_t, ReprocessStream> mReprocessStreams;
 
-    /** Simulated hardware interfaces */
-    sp<Sensor> mSensor;
-    sp<JpegCompressor> mJpegCompressor;
+  /** Simulated hardware interfaces */
+  sp<Sensor> mSensor;
+  sp<JpegCompressor> mJpegCompressor;
 
-    /** Pipeline control threads */
-    sp<ConfigureThread> mConfigureThread;
-    sp<ReadoutThread>   mReadoutThread;
-    sp<ControlThread>   mControlThread;
+  /** Pipeline control threads */
+  sp<ConfigureThread> mConfigureThread;
+  sp<ReadoutThread> mReadoutThread;
+  sp<ControlThread> mControlThread;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA2_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA2_H */
diff --git a/guest/hals/camera/EmulatedFakeCamera3.cpp b/guest/hals/camera/EmulatedFakeCamera3.cpp
index dcfc34c..def8a88 100644
--- a/guest/hals/camera/EmulatedFakeCamera3.cpp
+++ b/guest/hals/camera/EmulatedFakeCamera3.cpp
@@ -27,14 +27,14 @@
 #include <cutils/properties.h>
 #include <utils/Log.h>
 
-#include "EmulatedFakeCamera3.h"
-#include "EmulatedCameraFactory.h"
 #include <ui/Fence.h>
+#include "EmulatedCameraFactory.h"
+#include "EmulatedFakeCamera3.h"
 #include "GrallocModule.h"
 
-#include "fake-pipeline2/Sensor.h"
-#include "fake-pipeline2/JpegCompressor.h"
 #include <cmath>
+#include "fake-pipeline2/JpegCompressor.h"
+#include "fake-pipeline2/Sensor.h"
 
 #include <vector>
 
@@ -55,178 +55,173 @@
 const int64_t SEC = MSEC * 1000LL;
 
 const int32_t EmulatedFakeCamera3::kAvailableFormats[] = {
-        HAL_PIXEL_FORMAT_RAW16,
-        HAL_PIXEL_FORMAT_BLOB,
-        HAL_PIXEL_FORMAT_RGBA_8888,
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
-        // These are handled by YCbCr_420_888
-        //        HAL_PIXEL_FORMAT_YV12,
-        //        HAL_PIXEL_FORMAT_YCrCb_420_SP,
-        HAL_PIXEL_FORMAT_YCbCr_420_888,
-        HAL_PIXEL_FORMAT_Y16
-};
+    HAL_PIXEL_FORMAT_RAW16, HAL_PIXEL_FORMAT_BLOB, HAL_PIXEL_FORMAT_RGBA_8888,
+    HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+    // These are handled by YCbCr_420_888
+    //        HAL_PIXEL_FORMAT_YV12,
+    //        HAL_PIXEL_FORMAT_YCrCb_420_SP,
+    HAL_PIXEL_FORMAT_YCbCr_420_888, HAL_PIXEL_FORMAT_Y16};
 
 /**
  * 3A constants
  */
 
 // Default exposure and gain targets for different scenarios
-const nsecs_t EmulatedFakeCamera3::kNormalExposureTime       = 10 * MSEC;
+const nsecs_t EmulatedFakeCamera3::kNormalExposureTime = 10 * MSEC;
 const nsecs_t EmulatedFakeCamera3::kFacePriorityExposureTime = 30 * MSEC;
-const int     EmulatedFakeCamera3::kNormalSensitivity        = 100;
-const int     EmulatedFakeCamera3::kFacePrioritySensitivity  = 400;
-const float   EmulatedFakeCamera3::kExposureTrackRate        = 0.1;
-const int     EmulatedFakeCamera3::kPrecaptureMinFrames      = 10;
-const int     EmulatedFakeCamera3::kStableAeMaxFrames        = 100;
-const float   EmulatedFakeCamera3::kExposureWanderMin        = -2;
-const float   EmulatedFakeCamera3::kExposureWanderMax        = 1;
+const int EmulatedFakeCamera3::kNormalSensitivity = 100;
+const int EmulatedFakeCamera3::kFacePrioritySensitivity = 400;
+const float EmulatedFakeCamera3::kExposureTrackRate = 0.1;
+const int EmulatedFakeCamera3::kPrecaptureMinFrames = 10;
+const int EmulatedFakeCamera3::kStableAeMaxFrames = 100;
+const float EmulatedFakeCamera3::kExposureWanderMin = -2;
+const float EmulatedFakeCamera3::kExposureWanderMax = 1;
 
 /**
  * Camera device lifecycle methods
  */
 
 EmulatedFakeCamera3::EmulatedFakeCamera3(int cameraId, bool facingBack,
-        struct hw_module_t* module) :
-        EmulatedCamera3(cameraId, module),
-        mFacingBack(facingBack) {
-    ALOGI("Constructing emulated fake camera 3: ID %d, facing %s",
-            mCameraID, facingBack ? "back" : "front");
+                                         struct hw_module_t *module)
+    : EmulatedCamera3(cameraId, module), mFacingBack(facingBack) {
+  ALOGI("Constructing emulated fake camera 3: ID %d, facing %s", mCameraID,
+        facingBack ? "back" : "front");
 
-    for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) {
-        mDefaultTemplates[i] = NULL;
-    }
+  for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) {
+    mDefaultTemplates[i] = NULL;
+  }
 }
 
 EmulatedFakeCamera3::~EmulatedFakeCamera3() {
-    for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) {
-        if (mDefaultTemplates[i] != NULL) {
-            free_camera_metadata(mDefaultTemplates[i]);
-        }
+  for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++) {
+    if (mDefaultTemplates[i] != NULL) {
+      free_camera_metadata(mDefaultTemplates[i]);
     }
+  }
 }
 
-status_t EmulatedFakeCamera3::Initialize(const cvd::CameraDefinition& params) {
-    ALOGV("%s: E", __FUNCTION__);
-    status_t res;
+status_t EmulatedFakeCamera3::Initialize(const cvd::CameraDefinition &params) {
+  ALOGV("%s: E", __FUNCTION__);
+  status_t res;
 
-    if (mStatus != STATUS_ERROR) {
-        ALOGE("%s: Already initialized!", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
+  if (mStatus != STATUS_ERROR) {
+    ALOGE("%s: Already initialized!", __FUNCTION__);
+    return INVALID_OPERATION;
+  }
 
-    res = getCameraCapabilities();
-    if (res != OK) {
-        ALOGE("%s: Unable to get camera capabilities: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return res;
-    }
+  res = getCameraCapabilities();
+  if (res != OK) {
+    ALOGE("%s: Unable to get camera capabilities: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    return res;
+  }
 
-    res = constructStaticInfo(params);
-    if (res != OK) {
-        ALOGE("%s: Unable to allocate static info: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        return res;
-    }
+  res = constructStaticInfo(params);
+  if (res != OK) {
+    ALOGE("%s: Unable to allocate static info: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    return res;
+  }
 
-    return EmulatedCamera3::Initialize(params);
+  return EmulatedCamera3::Initialize(params);
 }
 
-status_t EmulatedFakeCamera3::connectCamera(hw_device_t** device) {
-    ALOGV("%s: E", __FUNCTION__);
-    Mutex::Autolock l(mLock);
-    status_t res;
+status_t EmulatedFakeCamera3::connectCamera(hw_device_t **device) {
+  ALOGV("%s: E", __FUNCTION__);
+  Mutex::Autolock l(mLock);
+  status_t res;
 
-    if (mStatus != STATUS_CLOSED) {
-        ALOGE("%s: Can't connect in state %d", __FUNCTION__, mStatus);
-        return INVALID_OPERATION;
-    }
+  if (mStatus != STATUS_CLOSED) {
+    ALOGE("%s: Can't connect in state %d", __FUNCTION__, mStatus);
+    return INVALID_OPERATION;
+  }
 
-    mSensor = new Sensor(mSensorWidth, mSensorHeight);
-    mSensor->setSensorListener(this);
+  mSensor = new Sensor(mSensorWidth, mSensorHeight);
+  mSensor->setSensorListener(this);
 
-    res = mSensor->startUp();
-    if (res != NO_ERROR) return res;
+  res = mSensor->startUp();
+  if (res != NO_ERROR) return res;
 
-    mReadoutThread = new ReadoutThread(this);
-    mJpegCompressor = new JpegCompressor();
+  mReadoutThread = new ReadoutThread(this);
+  mJpegCompressor = new JpegCompressor();
 
-    res = mReadoutThread->run("EmuCam3::readoutThread");
-    if (res != NO_ERROR) return res;
+  res = mReadoutThread->run("EmuCam3::readoutThread");
+  if (res != NO_ERROR) return res;
 
-    // Initialize fake 3A
+  // Initialize fake 3A
 
-    mControlMode  = ANDROID_CONTROL_MODE_AUTO;
-    mFacePriority = false;
-    mAeMode       = ANDROID_CONTROL_AE_MODE_ON;
-    mAfMode       = ANDROID_CONTROL_AF_MODE_AUTO;
-    mAwbMode      = ANDROID_CONTROL_AWB_MODE_AUTO;
-    mAeState      = ANDROID_CONTROL_AE_STATE_INACTIVE;
-    mAfState      = ANDROID_CONTROL_AF_STATE_INACTIVE;
-    mAwbState     = ANDROID_CONTROL_AWB_STATE_INACTIVE;
-    mAeCounter    = 0;
-    mAeTargetExposureTime = kNormalExposureTime;
-    mAeCurrentExposureTime = kNormalExposureTime;
-    mAeCurrentSensitivity  = kNormalSensitivity;
+  mControlMode = ANDROID_CONTROL_MODE_AUTO;
+  mFacePriority = false;
+  mAeMode = ANDROID_CONTROL_AE_MODE_ON;
+  mAfMode = ANDROID_CONTROL_AF_MODE_AUTO;
+  mAwbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+  mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+  mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+  mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+  mAeCounter = 0;
+  mAeTargetExposureTime = kNormalExposureTime;
+  mAeCurrentExposureTime = kNormalExposureTime;
+  mAeCurrentSensitivity = kNormalSensitivity;
 
-    return EmulatedCamera3::connectCamera(device);
+  return EmulatedCamera3::connectCamera(device);
 }
 
 status_t EmulatedFakeCamera3::closeCamera() {
-    ALOGV("%s: E", __FUNCTION__);
-    status_t res;
-    {
-        Mutex::Autolock l(mLock);
-        if (mStatus == STATUS_CLOSED) return OK;
+  ALOGV("%s: E", __FUNCTION__);
+  status_t res;
+  {
+    Mutex::Autolock l(mLock);
+    if (mStatus == STATUS_CLOSED) return OK;
 
-        res = mSensor->shutDown();
-        if (res != NO_ERROR) {
-            ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, res);
-            return res;
-        }
-        mSensor.clear();
-
-        mReadoutThread->requestExit();
+    res = mSensor->shutDown();
+    if (res != NO_ERROR) {
+      ALOGE("%s: Unable to shut down sensor: %d", __FUNCTION__, res);
+      return res;
     }
+    mSensor.clear();
 
-    mReadoutThread->join();
+    mReadoutThread->requestExit();
+  }
 
-    {
-        Mutex::Autolock l(mLock);
-        // Clear out private stream information
-        for (StreamIterator s = mStreams.begin(); s != mStreams.end(); s++) {
-            PrivateStreamInfo *privStream =
-                    static_cast<PrivateStreamInfo*>((*s)->priv);
-            delete privStream;
-            (*s)->priv = NULL;
-        }
-        mStreams.clear();
-        mReadoutThread.clear();
+  mReadoutThread->join();
+
+  {
+    Mutex::Autolock l(mLock);
+    // Clear out private stream information
+    for (StreamIterator s = mStreams.begin(); s != mStreams.end(); s++) {
+      PrivateStreamInfo *privStream =
+          static_cast<PrivateStreamInfo *>((*s)->priv);
+      delete privStream;
+      (*s)->priv = NULL;
     }
+    mStreams.clear();
+    mReadoutThread.clear();
+  }
 
-    return EmulatedCamera3::closeCamera();
+  return EmulatedCamera3::closeCamera();
 }
 
 status_t EmulatedFakeCamera3::getCameraInfo(struct camera_info *info) {
-    info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT;
-    info->orientation = EmulatedCameraFactory::Instance().getFakeCameraOrientation();
+  info->facing = mFacingBack ? CAMERA_FACING_BACK : CAMERA_FACING_FRONT;
+  info->orientation =
+      EmulatedCameraFactory::Instance().getFakeCameraOrientation();
 #if VSOC_PLATFORM_SDK_AFTER(L_MR1)
-    info->resource_cost = 100;
-    info->conflicting_devices = NULL;
-    info->conflicting_devices_length = 0;
+  info->resource_cost = 100;
+  info->conflicting_devices = NULL;
+  info->conflicting_devices_length = 0;
 #endif
-    return EmulatedCamera3::getCameraInfo(info);
+  return EmulatedCamera3::getCameraInfo(info);
 }
 
 status_t EmulatedFakeCamera3::setTorchMode(bool enabled) {
-    if (!mFacingBack) {
-        ALOGE("%s: Front camera does not have flash unit", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
-    EmulatedCameraFactory::Instance().onTorchModeStatusChanged(
-        mCameraID, enabled ?
-        TORCH_MODE_STATUS_AVAILABLE_ON :
-        TORCH_MODE_STATUS_AVAILABLE_OFF);
-    return NO_ERROR;
+  if (!mFacingBack) {
+    ALOGE("%s: Front camera does not have flash unit", __FUNCTION__);
+    return INVALID_OPERATION;
+  }
+  EmulatedCameraFactory::Instance().onTorchModeStatusChanged(
+      mCameraID, enabled ? TORCH_MODE_STATUS_AVAILABLE_ON
+                         : TORCH_MODE_STATUS_AVAILABLE_OFF);
+  return NO_ERROR;
 }
 
 /**
@@ -234,2392 +229,2437 @@
  */
 
 status_t EmulatedFakeCamera3::configureStreams(
-        camera3_stream_configuration *streamList) {
-    Mutex::Autolock l(mLock);
-    ALOGV("%s: %d streams", __FUNCTION__, streamList->num_streams);
+    camera3_stream_configuration *streamList) {
+  Mutex::Autolock l(mLock);
+  ALOGV("%s: %d streams", __FUNCTION__, streamList->num_streams);
 
-    if (mStatus != STATUS_OPEN && mStatus != STATUS_READY) {
-        ALOGE("%s: Cannot configure streams in state %d",
-                __FUNCTION__, mStatus);
-        return NO_INIT;
+  if (mStatus != STATUS_OPEN && mStatus != STATUS_READY) {
+    ALOGE("%s: Cannot configure streams in state %d", __FUNCTION__, mStatus);
+    return NO_INIT;
+  }
+
+  /**
+   * Sanity-check input list.
+   */
+  if (streamList == NULL) {
+    ALOGE("%s: NULL stream configuration", __FUNCTION__);
+    return BAD_VALUE;
+  }
+
+  if (streamList->streams == NULL) {
+    ALOGE("%s: NULL stream list", __FUNCTION__);
+    return BAD_VALUE;
+  }
+
+  if (streamList->num_streams < 1) {
+    ALOGE("%s: Bad number of streams requested: %d", __FUNCTION__,
+          streamList->num_streams);
+    return BAD_VALUE;
+  }
+
+  camera3_stream_t *inputStream = NULL;
+  for (size_t i = 0; i < streamList->num_streams; i++) {
+    camera3_stream_t *newStream = streamList->streams[i];
+
+    if (newStream == NULL) {
+      ALOGE("%s: Stream index %zu was NULL", __FUNCTION__, i);
+      return BAD_VALUE;
     }
 
-    /**
-     * Sanity-check input list.
-     */
-    if (streamList == NULL) {
-        ALOGE("%s: NULL stream configuration", __FUNCTION__);
+    ALOGV("%s: Stream %p (id %zu), type %d, usage 0x%x, format 0x%x",
+          __FUNCTION__, newStream, i, newStream->stream_type, newStream->usage,
+          newStream->format);
+
+    if (newStream->stream_type == CAMERA3_STREAM_INPUT ||
+        newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) {
+      if (inputStream != NULL) {
+        ALOGE("%s: Multiple input streams requested!", __FUNCTION__);
         return BAD_VALUE;
+      }
+      inputStream = newStream;
     }
 
-    if (streamList->streams == NULL) {
-        ALOGE("%s: NULL stream list", __FUNCTION__);
-        return BAD_VALUE;
+    bool validFormat = false;
+    for (size_t f = 0;
+         f < sizeof(kAvailableFormats) / sizeof(kAvailableFormats[0]); f++) {
+      if (newStream->format == kAvailableFormats[f]) {
+        validFormat = true;
+        break;
+      }
     }
-
-    if (streamList->num_streams < 1) {
-        ALOGE("%s: Bad number of streams requested: %d", __FUNCTION__,
-                streamList->num_streams);
-        return BAD_VALUE;
+    if (!validFormat) {
+      ALOGE("%s: Unsupported stream format 0x%x requested", __FUNCTION__,
+            newStream->format);
+      return BAD_VALUE;
     }
+  }
+  mInputStream = inputStream;
 
-    camera3_stream_t *inputStream = NULL;
-    for (size_t i = 0; i < streamList->num_streams; i++) {
-        camera3_stream_t *newStream = streamList->streams[i];
+  /**
+   * Initially mark all existing streams as not alive
+   */
+  for (StreamIterator s = mStreams.begin(); s != mStreams.end(); ++s) {
+    PrivateStreamInfo *privStream =
+        static_cast<PrivateStreamInfo *>((*s)->priv);
+    privStream->alive = false;
+  }
 
-        if (newStream == NULL) {
-            ALOGE("%s: Stream index %zu was NULL",
-                  __FUNCTION__, i);
-            return BAD_VALUE;
-        }
+  /**
+   * Find new streams and mark still-alive ones
+   */
+  for (size_t i = 0; i < streamList->num_streams; i++) {
+    camera3_stream_t *newStream = streamList->streams[i];
+    if (newStream->priv == NULL) {
+      // New stream, construct info
+      PrivateStreamInfo *privStream = new PrivateStreamInfo();
+      privStream->alive = true;
 
-        ALOGV("%s: Stream %p (id %zu), type %d, usage 0x%x, format 0x%x",
-                __FUNCTION__, newStream, i, newStream->stream_type,
-                newStream->usage,
-                newStream->format);
-
-        if (newStream->stream_type == CAMERA3_STREAM_INPUT ||
-            newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) {
-            if (inputStream != NULL) {
-
-                ALOGE("%s: Multiple input streams requested!", __FUNCTION__);
-                return BAD_VALUE;
-            }
-            inputStream = newStream;
-        }
-
-        bool validFormat = false;
-        for (size_t f = 0;
-             f < sizeof(kAvailableFormats)/sizeof(kAvailableFormats[0]);
-             f++) {
-            if (newStream->format == kAvailableFormats[f]) {
-                validFormat = true;
-                break;
-            }
-        }
-        if (!validFormat) {
-            ALOGE("%s: Unsupported stream format 0x%x requested",
-                    __FUNCTION__, newStream->format);
-            return BAD_VALUE;
-        }
+      newStream->max_buffers = kMaxBufferCount;
+      newStream->priv = privStream;
+      mStreams.push_back(newStream);
+    } else {
+      // Existing stream, mark as still alive.
+      PrivateStreamInfo *privStream =
+          static_cast<PrivateStreamInfo *>(newStream->priv);
+      privStream->alive = true;
     }
-    mInputStream = inputStream;
-
-    /**
-     * Initially mark all existing streams as not alive
-     */
-    for (StreamIterator s = mStreams.begin(); s != mStreams.end(); ++s) {
-        PrivateStreamInfo *privStream =
-                static_cast<PrivateStreamInfo*>((*s)->priv);
-        privStream->alive = false;
+    // Always update usage and max buffers
+    newStream->max_buffers = kMaxBufferCount;
+    switch (newStream->stream_type) {
+      case CAMERA3_STREAM_OUTPUT:
+        newStream->usage = GRALLOC_USAGE_HW_CAMERA_WRITE;
+        break;
+      case CAMERA3_STREAM_INPUT:
+        newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ;
+        break;
+      case CAMERA3_STREAM_BIDIRECTIONAL:
+        newStream->usage =
+            GRALLOC_USAGE_HW_CAMERA_READ | GRALLOC_USAGE_HW_CAMERA_WRITE;
+        break;
     }
+  }
 
-    /**
-     * Find new streams and mark still-alive ones
-     */
-    for (size_t i = 0; i < streamList->num_streams; i++) {
-        camera3_stream_t *newStream = streamList->streams[i];
-        if (newStream->priv == NULL) {
-            // New stream, construct info
-            PrivateStreamInfo *privStream = new PrivateStreamInfo();
-            privStream->alive = true;
-
-            newStream->max_buffers = kMaxBufferCount;
-            newStream->priv = privStream;
-            mStreams.push_back(newStream);
-        } else {
-            // Existing stream, mark as still alive.
-            PrivateStreamInfo *privStream =
-                    static_cast<PrivateStreamInfo*>(newStream->priv);
-            privStream->alive = true;
-        }
-        // Always update usage and max buffers
-        newStream->max_buffers = kMaxBufferCount;
-        switch (newStream->stream_type) {
-            case CAMERA3_STREAM_OUTPUT:
-                newStream->usage = GRALLOC_USAGE_HW_CAMERA_WRITE;
-                break;
-            case CAMERA3_STREAM_INPUT:
-                newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ;
-                break;
-            case CAMERA3_STREAM_BIDIRECTIONAL:
-                newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ |
-                        GRALLOC_USAGE_HW_CAMERA_WRITE;
-                break;
-        }
+  /**
+   * Reap the dead streams
+   */
+  for (StreamIterator s = mStreams.begin(); s != mStreams.end();) {
+    PrivateStreamInfo *privStream =
+        static_cast<PrivateStreamInfo *>((*s)->priv);
+    if (!privStream->alive) {
+      (*s)->priv = NULL;
+      delete privStream;
+      s = mStreams.erase(s);
+    } else {
+      ++s;
     }
+  }
 
-    /**
-     * Reap the dead streams
-     */
-    for (StreamIterator s = mStreams.begin(); s != mStreams.end();) {
-        PrivateStreamInfo *privStream =
-                static_cast<PrivateStreamInfo*>((*s)->priv);
-        if (!privStream->alive) {
-            (*s)->priv = NULL;
-            delete privStream;
-            s = mStreams.erase(s);
-        } else {
-            ++s;
-        }
-    }
+  /**
+   * Can't reuse settings across configure call
+   */
+  mPrevSettings.clear();
 
-    /**
-     * Can't reuse settings across configure call
-     */
-    mPrevSettings.clear();
-
-    return OK;
+  return OK;
 }
 
 status_t EmulatedFakeCamera3::registerStreamBuffers(
-        const camera3_stream_buffer_set *bufferSet) {
-    ALOGV("%s: E", __FUNCTION__);
-    Mutex::Autolock l(mLock);
+    const camera3_stream_buffer_set *bufferSet) {
+  ALOGV("%s: E", __FUNCTION__);
+  Mutex::Autolock l(mLock);
 
-    // Should not be called in HAL versions >= 3.2
+  // Should not be called in HAL versions >= 3.2
 
-    ALOGE("%s: Should not be invoked on new HALs!",
-            __FUNCTION__);
-    return NO_INIT;
+  ALOGE("%s: Should not be invoked on new HALs!", __FUNCTION__);
+  return NO_INIT;
 }
 
-const camera_metadata_t* EmulatedFakeCamera3::constructDefaultRequestSettings(
-        int type) {
-    ALOGV("%s: E", __FUNCTION__);
-    Mutex::Autolock l(mLock);
+const camera_metadata_t *EmulatedFakeCamera3::constructDefaultRequestSettings(
+    int type) {
+  ALOGV("%s: E", __FUNCTION__);
+  Mutex::Autolock l(mLock);
 
-    if (type < 0 || type >= CAMERA3_TEMPLATE_COUNT) {
-        ALOGE("%s: Unknown request settings template: %d",
-                __FUNCTION__, type);
-        return NULL;
-    }
+  if (type < 0 || type >= CAMERA3_TEMPLATE_COUNT) {
+    ALOGE("%s: Unknown request settings template: %d", __FUNCTION__, type);
+    return NULL;
+  }
 
-    if (!hasCapability(BACKWARD_COMPATIBLE) && type != CAMERA3_TEMPLATE_PREVIEW) {
-        ALOGE("%s: Template %d not supported w/o BACKWARD_COMPATIBLE capability",
-                __FUNCTION__, type);
-        return NULL;
-    }
+  if (!hasCapability(BACKWARD_COMPATIBLE) && type != CAMERA3_TEMPLATE_PREVIEW) {
+    ALOGE("%s: Template %d not supported w/o BACKWARD_COMPATIBLE capability",
+          __FUNCTION__, type);
+    return NULL;
+  }
 
-    /**
-     * Cache is not just an optimization - pointer returned has to live at
-     * least as long as the camera device instance does.
-     */
-    if (mDefaultTemplates[type] != NULL) {
-        return mDefaultTemplates[type];
-    }
-
-    CameraMetadata settings;
-
-    /** android.request */
-
-    static const uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
-    settings.update(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1);
-
-    static const int32_t id = 0;
-    settings.update(ANDROID_REQUEST_ID, &id, 1);
-
-    static const int32_t frameCount = 0;
-    settings.update(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1);
-
-    /** android.lens */
-
-    static const float focalLength = 5.0f;
-    settings.update(ANDROID_LENS_FOCAL_LENGTH, &focalLength, 1);
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const float focusDistance = 0;
-        settings.update(ANDROID_LENS_FOCUS_DISTANCE, &focusDistance, 1);
-
-        static const float aperture = 2.8f;
-        settings.update(ANDROID_LENS_APERTURE, &aperture, 1);
-
-        static const float filterDensity = 0;
-        settings.update(ANDROID_LENS_FILTER_DENSITY, &filterDensity, 1);
-
-        static const uint8_t opticalStabilizationMode =
-                ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
-        settings.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
-                &opticalStabilizationMode, 1);
-
-        // FOCUS_RANGE set only in frame
-    }
-
-    /** android.sensor */
-
-    if (hasCapability(MANUAL_SENSOR)) {
-        static const int64_t exposureTime = 10 * MSEC;
-        settings.update(ANDROID_SENSOR_EXPOSURE_TIME, &exposureTime, 1);
-
-        static const int64_t frameDuration = 33333333L; // 1/30 s
-        settings.update(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
-
-        static const int32_t sensitivity = 100;
-        settings.update(ANDROID_SENSOR_SENSITIVITY, &sensitivity, 1);
-    }
-
-    // TIMESTAMP set only in frame
-
-    /** android.flash */
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
-        settings.update(ANDROID_FLASH_MODE, &flashMode, 1);
-
-        static const uint8_t flashPower = 10;
-        settings.update(ANDROID_FLASH_FIRING_POWER, &flashPower, 1);
-
-        static const int64_t firingTime = 0;
-        settings.update(ANDROID_FLASH_FIRING_TIME, &firingTime, 1);
-    }
-
-    /** Processing block modes */
-    if (hasCapability(MANUAL_POST_PROCESSING)) {
-        uint8_t hotPixelMode = 0;
-        uint8_t demosaicMode = 0;
-        uint8_t noiseMode = 0;
-        uint8_t shadingMode = 0;
-        uint8_t colorMode = 0;
-        uint8_t tonemapMode = 0;
-        uint8_t edgeMode = 0;
-        switch (type) {
-            case CAMERA3_TEMPLATE_STILL_CAPTURE:
-                // fall-through
-            case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
-                // fall-through
-            case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
-                hotPixelMode = ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY;
-                demosaicMode = ANDROID_DEMOSAIC_MODE_HIGH_QUALITY;
-                noiseMode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
-                shadingMode = ANDROID_SHADING_MODE_HIGH_QUALITY;
-                colorMode = ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY;
-                tonemapMode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
-                edgeMode = ANDROID_EDGE_MODE_HIGH_QUALITY;
-                break;
-            case CAMERA3_TEMPLATE_PREVIEW:
-                // fall-through
-            case CAMERA3_TEMPLATE_VIDEO_RECORD:
-                // fall-through
-            default:
-                hotPixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
-                demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
-                noiseMode = ANDROID_NOISE_REDUCTION_MODE_FAST;
-                shadingMode = ANDROID_SHADING_MODE_FAST;
-                colorMode = ANDROID_COLOR_CORRECTION_MODE_FAST;
-                tonemapMode = ANDROID_TONEMAP_MODE_FAST;
-                edgeMode = ANDROID_EDGE_MODE_FAST;
-                break;
-        }
-        settings.update(ANDROID_HOT_PIXEL_MODE, &hotPixelMode, 1);
-        settings.update(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
-        settings.update(ANDROID_NOISE_REDUCTION_MODE, &noiseMode, 1);
-        settings.update(ANDROID_SHADING_MODE, &shadingMode, 1);
-        settings.update(ANDROID_COLOR_CORRECTION_MODE, &colorMode, 1);
-        settings.update(ANDROID_TONEMAP_MODE, &tonemapMode, 1);
-        settings.update(ANDROID_EDGE_MODE, &edgeMode, 1);
-    }
-
-    /** android.colorCorrection */
-
-    if (hasCapability(MANUAL_POST_PROCESSING)) {
-        static const camera_metadata_rational colorTransform[9] = {
-            {1,1}, {0,1}, {0,1},
-            {0,1}, {1,1}, {0,1},
-            {0,1}, {0,1}, {1,1}
-        };
-        settings.update(ANDROID_COLOR_CORRECTION_TRANSFORM, colorTransform, 9);
-
-        static const float colorGains[4] = {
-            1.0f, 1.0f, 1.0f, 1.0f
-        };
-        settings.update(ANDROID_COLOR_CORRECTION_GAINS, colorGains, 4);
-    }
-
-    /** android.tonemap */
-
-    if (hasCapability(MANUAL_POST_PROCESSING)) {
-        static const float tonemapCurve[4] = {
-            0.f, 0.f,
-            1.f, 1.f
-        };
-        settings.update(ANDROID_TONEMAP_CURVE_RED, tonemapCurve, 4);
-        settings.update(ANDROID_TONEMAP_CURVE_GREEN, tonemapCurve, 4);
-        settings.update(ANDROID_TONEMAP_CURVE_BLUE, tonemapCurve, 4);
-    }
-
-    /** android.scaler */
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const int32_t cropRegion[4] = {
-            0, 0, mSensorWidth, mSensorHeight
-        };
-        settings.update(ANDROID_SCALER_CROP_REGION, cropRegion, 4);
-
-    }
-
-    /** android.jpeg */
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t jpegQuality = 80;
-        settings.update(ANDROID_JPEG_QUALITY, &jpegQuality, 1);
-
-        static const int32_t thumbnailSize[2] = {
-            640, 480
-        };
-        settings.update(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnailSize, 2);
-
-        static const uint8_t thumbnailQuality = 80;
-        settings.update(ANDROID_JPEG_THUMBNAIL_QUALITY, &thumbnailQuality, 1);
-
-        static const double gpsCoordinates[2] = {
-            0, 0
-        };
-        settings.update(ANDROID_JPEG_GPS_COORDINATES, gpsCoordinates, 2);
-
-        static const uint8_t gpsProcessingMethod[32] = "None";
-        settings.update(ANDROID_JPEG_GPS_PROCESSING_METHOD, gpsProcessingMethod, 32);
-
-        static const int64_t gpsTimestamp = 0;
-        settings.update(ANDROID_JPEG_GPS_TIMESTAMP, &gpsTimestamp, 1);
-
-        static const int32_t jpegOrientation = 0;
-        settings.update(ANDROID_JPEG_ORIENTATION, &jpegOrientation, 1);
-    }
-
-    /** android.stats */
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t faceDetectMode =
-                ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
-        settings.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
-
-        static const uint8_t hotPixelMapMode =
-                ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
-        settings.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1);
-    }
-
-    // faceRectangles, faceScores, faceLandmarks, faceIds, histogram,
-    // sharpnessMap only in frames
-
-    /** android.control */
-
-    uint8_t controlIntent = 0;
-    switch (type) {
-      case CAMERA3_TEMPLATE_PREVIEW:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
-        break;
-      case CAMERA3_TEMPLATE_STILL_CAPTURE:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
-        break;
-      case CAMERA3_TEMPLATE_VIDEO_RECORD:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
-        break;
-      case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
-        break;
-      case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
-        break;
-      case CAMERA3_TEMPLATE_MANUAL:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL;
-        break;
-      default:
-        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
-        break;
-    }
-    settings.update(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
-
-    const uint8_t controlMode = (type == CAMERA3_TEMPLATE_MANUAL) ?
-            ANDROID_CONTROL_MODE_OFF :
-            ANDROID_CONTROL_MODE_AUTO;
-    settings.update(ANDROID_CONTROL_MODE, &controlMode, 1);
-
-    int32_t aeTargetFpsRange[2] = {
-        5, 30
-    };
-    if (type == CAMERA3_TEMPLATE_VIDEO_RECORD || type == CAMERA3_TEMPLATE_VIDEO_SNAPSHOT) {
-        aeTargetFpsRange[0] = 30;
-    }
-    settings.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, aeTargetFpsRange, 2);
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-
-        static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
-        settings.update(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
-
-        static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
-        settings.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
-
-        const uint8_t aeMode = (type == CAMERA3_TEMPLATE_MANUAL) ?
-                ANDROID_CONTROL_AE_MODE_OFF :
-                ANDROID_CONTROL_AE_MODE_ON;
-        settings.update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
-
-        static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
-        settings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
-
-        static const int32_t controlRegions[5] = {
-            0, 0, 0, 0, 0
-        };
-        settings.update(ANDROID_CONTROL_AE_REGIONS, controlRegions, 5);
-
-        static const int32_t aeExpCompensation = 0;
-        settings.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, &aeExpCompensation, 1);
-
-
-        static const uint8_t aeAntibandingMode =
-                ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
-        settings.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &aeAntibandingMode, 1);
-
-        static const uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
-        settings.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &aePrecaptureTrigger, 1);
-
-        const uint8_t awbMode = (type == CAMERA3_TEMPLATE_MANUAL) ?
-                ANDROID_CONTROL_AWB_MODE_OFF :
-                ANDROID_CONTROL_AWB_MODE_AUTO;
-        settings.update(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
-
-        static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
-        settings.update(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
-
-        uint8_t afMode = 0;
-
-        if (mFacingBack) {
-            switch (type) {
-                case CAMERA3_TEMPLATE_PREVIEW:
-                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
-                    break;
-                case CAMERA3_TEMPLATE_STILL_CAPTURE:
-                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
-                    break;
-                case CAMERA3_TEMPLATE_VIDEO_RECORD:
-                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
-                    break;
-                case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
-                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
-                    break;
-                case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
-                    afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
-                    break;
-                case CAMERA3_TEMPLATE_MANUAL:
-                    afMode = ANDROID_CONTROL_AF_MODE_OFF;
-                    break;
-                default:
-                    afMode = ANDROID_CONTROL_AF_MODE_AUTO;
-                    break;
-            }
-        } else {
-            afMode = ANDROID_CONTROL_AF_MODE_OFF;
-        }
-        settings.update(ANDROID_CONTROL_AF_MODE, &afMode, 1);
-
-        settings.update(ANDROID_CONTROL_AF_REGIONS, controlRegions, 5);
-
-        static const uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
-        settings.update(ANDROID_CONTROL_AF_TRIGGER, &afTrigger, 1);
-
-        static const uint8_t vstabMode =
-                ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
-        settings.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vstabMode, 1);
-
-        static const uint8_t blackLevelLock = ANDROID_BLACK_LEVEL_LOCK_OFF;
-        settings.update(ANDROID_BLACK_LEVEL_LOCK, &blackLevelLock, 1);
-
-        static const uint8_t lensShadingMapMode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
-        settings.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &lensShadingMapMode, 1);
-
-        static const uint8_t aberrationMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
-        settings.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, &aberrationMode, 1);
-
-        static const int32_t testPatternMode = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
-        settings.update(ANDROID_SENSOR_TEST_PATTERN_MODE, &testPatternMode, 1);
-    }
-
-    mDefaultTemplates[type] = settings.release();
-
+  /**
+   * Cache is not just an optimization - pointer returned has to live at
+   * least as long as the camera device instance does.
+   */
+  if (mDefaultTemplates[type] != NULL) {
     return mDefaultTemplates[type];
+  }
+
+  CameraMetadata settings;
+
+  /** android.request */
+
+  static const uint8_t metadataMode = ANDROID_REQUEST_METADATA_MODE_FULL;
+  settings.update(ANDROID_REQUEST_METADATA_MODE, &metadataMode, 1);
+
+  static const int32_t id = 0;
+  settings.update(ANDROID_REQUEST_ID, &id, 1);
+
+  static const int32_t frameCount = 0;
+  settings.update(ANDROID_REQUEST_FRAME_COUNT, &frameCount, 1);
+
+  /** android.lens */
+
+  static const float focalLength = 5.0f;
+  settings.update(ANDROID_LENS_FOCAL_LENGTH, &focalLength, 1);
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const float focusDistance = 0;
+    settings.update(ANDROID_LENS_FOCUS_DISTANCE, &focusDistance, 1);
+
+    static const float aperture = 2.8f;
+    settings.update(ANDROID_LENS_APERTURE, &aperture, 1);
+
+    static const float filterDensity = 0;
+    settings.update(ANDROID_LENS_FILTER_DENSITY, &filterDensity, 1);
+
+    static const uint8_t opticalStabilizationMode =
+        ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+    settings.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+                    &opticalStabilizationMode, 1);
+
+    // FOCUS_RANGE set only in frame
+  }
+
+  /** android.sensor */
+
+  if (hasCapability(MANUAL_SENSOR)) {
+    static const int64_t exposureTime = 10 * MSEC;
+    settings.update(ANDROID_SENSOR_EXPOSURE_TIME, &exposureTime, 1);
+
+    static const int64_t frameDuration = 33333333L;  // 1/30 s
+    settings.update(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+
+    static const int32_t sensitivity = 100;
+    settings.update(ANDROID_SENSOR_SENSITIVITY, &sensitivity, 1);
+  }
+
+  // TIMESTAMP set only in frame
+
+  /** android.flash */
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+    settings.update(ANDROID_FLASH_MODE, &flashMode, 1);
+
+    static const uint8_t flashPower = 10;
+    settings.update(ANDROID_FLASH_FIRING_POWER, &flashPower, 1);
+
+    static const int64_t firingTime = 0;
+    settings.update(ANDROID_FLASH_FIRING_TIME, &firingTime, 1);
+  }
+
+  /** Processing block modes */
+  if (hasCapability(MANUAL_POST_PROCESSING)) {
+    uint8_t hotPixelMode = 0;
+    uint8_t demosaicMode = 0;
+    uint8_t noiseMode = 0;
+    uint8_t shadingMode = 0;
+    uint8_t colorMode = 0;
+    uint8_t tonemapMode = 0;
+    uint8_t edgeMode = 0;
+    switch (type) {
+      case CAMERA3_TEMPLATE_STILL_CAPTURE:
+        // fall-through
+      case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+        // fall-through
+      case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+        hotPixelMode = ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY;
+        demosaicMode = ANDROID_DEMOSAIC_MODE_HIGH_QUALITY;
+        noiseMode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
+        shadingMode = ANDROID_SHADING_MODE_HIGH_QUALITY;
+        colorMode = ANDROID_COLOR_CORRECTION_MODE_HIGH_QUALITY;
+        tonemapMode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
+        edgeMode = ANDROID_EDGE_MODE_HIGH_QUALITY;
+        break;
+      case CAMERA3_TEMPLATE_PREVIEW:
+        // fall-through
+      case CAMERA3_TEMPLATE_VIDEO_RECORD:
+        // fall-through
+      default:
+        hotPixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
+        demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
+        noiseMode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+        shadingMode = ANDROID_SHADING_MODE_FAST;
+        colorMode = ANDROID_COLOR_CORRECTION_MODE_FAST;
+        tonemapMode = ANDROID_TONEMAP_MODE_FAST;
+        edgeMode = ANDROID_EDGE_MODE_FAST;
+        break;
+    }
+    settings.update(ANDROID_HOT_PIXEL_MODE, &hotPixelMode, 1);
+    settings.update(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
+    settings.update(ANDROID_NOISE_REDUCTION_MODE, &noiseMode, 1);
+    settings.update(ANDROID_SHADING_MODE, &shadingMode, 1);
+    settings.update(ANDROID_COLOR_CORRECTION_MODE, &colorMode, 1);
+    settings.update(ANDROID_TONEMAP_MODE, &tonemapMode, 1);
+    settings.update(ANDROID_EDGE_MODE, &edgeMode, 1);
+  }
+
+  /** android.colorCorrection */
+
+  if (hasCapability(MANUAL_POST_PROCESSING)) {
+    static const camera_metadata_rational colorTransform[9] = {
+        {1, 1}, {0, 1}, {0, 1}, {0, 1}, {1, 1}, {0, 1}, {0, 1}, {0, 1}, {1, 1}};
+    settings.update(ANDROID_COLOR_CORRECTION_TRANSFORM, colorTransform, 9);
+
+    static const float colorGains[4] = {1.0f, 1.0f, 1.0f, 1.0f};
+    settings.update(ANDROID_COLOR_CORRECTION_GAINS, colorGains, 4);
+  }
+
+  /** android.tonemap */
+
+  if (hasCapability(MANUAL_POST_PROCESSING)) {
+    static const float tonemapCurve[4] = {0.f, 0.f, 1.f, 1.f};
+    settings.update(ANDROID_TONEMAP_CURVE_RED, tonemapCurve, 4);
+    settings.update(ANDROID_TONEMAP_CURVE_GREEN, tonemapCurve, 4);
+    settings.update(ANDROID_TONEMAP_CURVE_BLUE, tonemapCurve, 4);
+  }
+
+  /** android.scaler */
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const int32_t cropRegion[4] = {0, 0, mSensorWidth, mSensorHeight};
+    settings.update(ANDROID_SCALER_CROP_REGION, cropRegion, 4);
+  }
+
+  /** android.jpeg */
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t jpegQuality = 80;
+    settings.update(ANDROID_JPEG_QUALITY, &jpegQuality, 1);
+
+    static const int32_t thumbnailSize[2] = {640, 480};
+    settings.update(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnailSize, 2);
+
+    static const uint8_t thumbnailQuality = 80;
+    settings.update(ANDROID_JPEG_THUMBNAIL_QUALITY, &thumbnailQuality, 1);
+
+    static const double gpsCoordinates[2] = {0, 0};
+    settings.update(ANDROID_JPEG_GPS_COORDINATES, gpsCoordinates, 2);
+
+    static const uint8_t gpsProcessingMethod[32] = "None";
+    settings.update(ANDROID_JPEG_GPS_PROCESSING_METHOD, gpsProcessingMethod,
+                    32);
+
+    static const int64_t gpsTimestamp = 0;
+    settings.update(ANDROID_JPEG_GPS_TIMESTAMP, &gpsTimestamp, 1);
+
+    static const int32_t jpegOrientation = 0;
+    settings.update(ANDROID_JPEG_ORIENTATION, &jpegOrientation, 1);
+  }
+
+  /** android.stats */
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t faceDetectMode =
+        ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+    settings.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
+
+    static const uint8_t hotPixelMapMode =
+        ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
+    settings.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1);
+  }
+
+  // faceRectangles, faceScores, faceLandmarks, faceIds, histogram,
+  // sharpnessMap only in frames
+
+  /** android.control */
+
+  uint8_t controlIntent = 0;
+  switch (type) {
+    case CAMERA3_TEMPLATE_PREVIEW:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+      break;
+    case CAMERA3_TEMPLATE_STILL_CAPTURE:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+      break;
+    case CAMERA3_TEMPLATE_VIDEO_RECORD:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+      break;
+    case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
+      break;
+    case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+      break;
+    case CAMERA3_TEMPLATE_MANUAL:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL;
+      break;
+    default:
+      controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
+      break;
+  }
+  settings.update(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
+
+  const uint8_t controlMode = (type == CAMERA3_TEMPLATE_MANUAL)
+                                  ? ANDROID_CONTROL_MODE_OFF
+                                  : ANDROID_CONTROL_MODE_AUTO;
+  settings.update(ANDROID_CONTROL_MODE, &controlMode, 1);
+
+  int32_t aeTargetFpsRange[2] = {5, 30};
+  if (type == CAMERA3_TEMPLATE_VIDEO_RECORD ||
+      type == CAMERA3_TEMPLATE_VIDEO_SNAPSHOT) {
+    aeTargetFpsRange[0] = 30;
+  }
+  settings.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, aeTargetFpsRange, 2);
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+    settings.update(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
+
+    static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+    settings.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+
+    const uint8_t aeMode = (type == CAMERA3_TEMPLATE_MANUAL)
+                               ? ANDROID_CONTROL_AE_MODE_OFF
+                               : ANDROID_CONTROL_AE_MODE_ON;
+    settings.update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
+
+    static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+    settings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
+
+    static const int32_t controlRegions[5] = {0, 0, 0, 0, 0};
+    settings.update(ANDROID_CONTROL_AE_REGIONS, controlRegions, 5);
+
+    static const int32_t aeExpCompensation = 0;
+    settings.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+                    &aeExpCompensation, 1);
+
+    static const uint8_t aeAntibandingMode =
+        ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+    settings.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &aeAntibandingMode, 1);
+
+    static const uint8_t aePrecaptureTrigger =
+        ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+    settings.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &aePrecaptureTrigger,
+                    1);
+
+    const uint8_t awbMode = (type == CAMERA3_TEMPLATE_MANUAL)
+                                ? ANDROID_CONTROL_AWB_MODE_OFF
+                                : ANDROID_CONTROL_AWB_MODE_AUTO;
+    settings.update(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
+
+    static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+    settings.update(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
+
+    uint8_t afMode = 0;
+
+    if (mFacingBack) {
+      switch (type) {
+        case CAMERA3_TEMPLATE_PREVIEW:
+          afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+          break;
+        case CAMERA3_TEMPLATE_STILL_CAPTURE:
+          afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+          break;
+        case CAMERA3_TEMPLATE_VIDEO_RECORD:
+          afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+          break;
+        case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+          afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+          break;
+        case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+          afMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+          break;
+        case CAMERA3_TEMPLATE_MANUAL:
+          afMode = ANDROID_CONTROL_AF_MODE_OFF;
+          break;
+        default:
+          afMode = ANDROID_CONTROL_AF_MODE_AUTO;
+          break;
+      }
+    } else {
+      afMode = ANDROID_CONTROL_AF_MODE_OFF;
+    }
+    settings.update(ANDROID_CONTROL_AF_MODE, &afMode, 1);
+
+    settings.update(ANDROID_CONTROL_AF_REGIONS, controlRegions, 5);
+
+    static const uint8_t afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+    settings.update(ANDROID_CONTROL_AF_TRIGGER, &afTrigger, 1);
+
+    static const uint8_t vstabMode =
+        ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+    settings.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vstabMode, 1);
+
+    static const uint8_t blackLevelLock = ANDROID_BLACK_LEVEL_LOCK_OFF;
+    settings.update(ANDROID_BLACK_LEVEL_LOCK, &blackLevelLock, 1);
+
+    static const uint8_t lensShadingMapMode =
+        ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
+    settings.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE,
+                    &lensShadingMapMode, 1);
+
+    static const uint8_t aberrationMode =
+        ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
+    settings.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, &aberrationMode,
+                    1);
+
+    static const int32_t testPatternMode = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+    settings.update(ANDROID_SENSOR_TEST_PATTERN_MODE, &testPatternMode, 1);
+  }
+
+  mDefaultTemplates[type] = settings.release();
+
+  return mDefaultTemplates[type];
 }
 
 status_t EmulatedFakeCamera3::processCaptureRequest(
-        camera3_capture_request *request) {
+    camera3_capture_request *request) {
+  Mutex::Autolock l(mLock);
+  status_t res;
 
-    Mutex::Autolock l(mLock);
-    status_t res;
+  /** Validation */
 
-    /** Validation */
+  if (mStatus < STATUS_READY) {
+    ALOGE("%s: Can't submit capture requests in state %d", __FUNCTION__,
+          mStatus);
+    return INVALID_OPERATION;
+  }
 
-    if (mStatus < STATUS_READY) {
-        ALOGE("%s: Can't submit capture requests in state %d", __FUNCTION__,
-                mStatus);
-        return INVALID_OPERATION;
+  if (request == NULL) {
+    ALOGE("%s: NULL request!", __FUNCTION__);
+    return BAD_VALUE;
+  }
+
+  uint32_t frameNumber = request->frame_number;
+
+  if (request->settings == NULL && mPrevSettings.isEmpty()) {
+    ALOGE(
+        "%s: Request %d: NULL settings for first request after"
+        "configureStreams()",
+        __FUNCTION__, frameNumber);
+    return BAD_VALUE;
+  }
+
+  if (request->input_buffer != NULL &&
+      request->input_buffer->stream != mInputStream) {
+    ALOGE("%s: Request %d: Input buffer not from input stream!", __FUNCTION__,
+          frameNumber);
+    ALOGV("%s: Bad stream %p, expected: %p", __FUNCTION__,
+          request->input_buffer->stream, mInputStream);
+    ALOGV("%s: Bad stream type %d, expected stream type %d", __FUNCTION__,
+          request->input_buffer->stream->stream_type,
+          mInputStream ? mInputStream->stream_type : -1);
+
+    return BAD_VALUE;
+  }
+
+  if (request->num_output_buffers < 1 || request->output_buffers == NULL) {
+    ALOGE("%s: Request %d: No output buffers provided!", __FUNCTION__,
+          frameNumber);
+    return BAD_VALUE;
+  }
+
+  // Validate all buffers, starting with input buffer if it's given
+
+  ssize_t idx;
+  const camera3_stream_buffer_t *b;
+  if (request->input_buffer != NULL) {
+    idx = -1;
+    b = request->input_buffer;
+  } else {
+    idx = 0;
+    b = request->output_buffers;
+  }
+  do {
+    PrivateStreamInfo *priv = static_cast<PrivateStreamInfo *>(b->stream->priv);
+    if (priv == NULL) {
+      ALOGE("%s: Request %d: Buffer %zu: Unconfigured stream!", __FUNCTION__,
+            frameNumber, idx);
+      return BAD_VALUE;
+    }
+    if (!priv->alive) {
+      ALOGE("%s: Request %d: Buffer %zu: Dead stream!", __FUNCTION__,
+            frameNumber, idx);
+      return BAD_VALUE;
+    }
+    if (b->status != CAMERA3_BUFFER_STATUS_OK) {
+      ALOGE("%s: Request %d: Buffer %zu: Status not OK!", __FUNCTION__,
+            frameNumber, idx);
+      return BAD_VALUE;
+    }
+    if (b->release_fence != -1) {
+      ALOGE("%s: Request %d: Buffer %zu: Has a release fence!", __FUNCTION__,
+            frameNumber, idx);
+      return BAD_VALUE;
+    }
+    if (b->buffer == NULL) {
+      ALOGE("%s: Request %d: Buffer %zu: NULL buffer handle!", __FUNCTION__,
+            frameNumber, idx);
+      return BAD_VALUE;
+    }
+    idx++;
+    b = &(request->output_buffers[idx]);
+  } while (idx < (ssize_t)request->num_output_buffers);
+
+  // TODO: Validate settings parameters
+
+  /**
+   * Start processing this request
+   */
+
+  mStatus = STATUS_ACTIVE;
+
+  CameraMetadata settings;
+
+  if (request->settings == NULL) {
+    settings.acquire(mPrevSettings);
+  } else {
+    settings = request->settings;
+  }
+
+  res = process3A(settings);
+  if (res != OK) {
+    return res;
+  }
+
+  // TODO: Handle reprocessing
+
+  /**
+   * Get ready for sensor config
+   */
+
+  nsecs_t exposureTime;
+  nsecs_t frameDuration;
+  uint32_t sensitivity;
+  bool needJpeg = false;
+  camera_metadata_entry_t entry;
+
+  entry = settings.find(ANDROID_SENSOR_EXPOSURE_TIME);
+  exposureTime =
+      (entry.count > 0) ? entry.data.i64[0] : Sensor::kExposureTimeRange[0];
+  entry = settings.find(ANDROID_SENSOR_FRAME_DURATION);
+  frameDuration =
+      (entry.count > 0) ? entry.data.i64[0] : Sensor::kFrameDurationRange[0];
+  entry = settings.find(ANDROID_SENSOR_SENSITIVITY);
+  sensitivity =
+      (entry.count > 0) ? entry.data.i32[0] : Sensor::kSensitivityRange[0];
+
+  if (exposureTime > frameDuration) {
+    frameDuration = exposureTime + Sensor::kMinVerticalBlank;
+    settings.update(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+  }
+
+  Buffers *sensorBuffers = new Buffers();
+  HalBufferVector *buffers = new HalBufferVector();
+
+  sensorBuffers->setCapacity(request->num_output_buffers);
+  buffers->setCapacity(request->num_output_buffers);
+
+  // Process all the buffers we got for output, constructing internal buffer
+  // structures for them, and lock them for writing.
+  for (size_t i = 0; i < request->num_output_buffers; i++) {
+    const camera3_stream_buffer &srcBuf = request->output_buffers[i];
+    StreamBuffer destBuf;
+    destBuf.streamId = kGenericStreamId;
+    destBuf.width = srcBuf.stream->width;
+    destBuf.height = srcBuf.stream->height;
+    // For GCE, IMPLEMENTATION_DEFINED is always RGBx_8888
+    destBuf.format =
+        (srcBuf.stream->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED)
+            ? HAL_PIXEL_FORMAT_RGBA_8888
+            : srcBuf.stream->format;
+    destBuf.stride = srcBuf.stream->width;
+    destBuf.dataSpace = srcBuf.stream->data_space;
+    destBuf.buffer = srcBuf.buffer;
+
+    if (destBuf.format == HAL_PIXEL_FORMAT_BLOB) {
+      needJpeg = true;
     }
 
-    if (request == NULL) {
-        ALOGE("%s: NULL request!", __FUNCTION__);
-        return BAD_VALUE;
+    // Wait on fence
+    sp<Fence> bufferAcquireFence = new Fence(srcBuf.acquire_fence);
+    res = bufferAcquireFence->wait(kFenceTimeoutMs);
+    if (res == TIMED_OUT) {
+      ALOGE("%s: Request %d: Buffer %zu: Fence timed out after %d ms",
+            __FUNCTION__, frameNumber, i, kFenceTimeoutMs);
     }
-
-    uint32_t frameNumber = request->frame_number;
-
-    if (request->settings == NULL && mPrevSettings.isEmpty()) {
-        ALOGE("%s: Request %d: NULL settings for first request after"
-                "configureStreams()", __FUNCTION__, frameNumber);
-        return BAD_VALUE;
-    }
-
-    if (request->input_buffer != NULL &&
-            request->input_buffer->stream != mInputStream) {
-        ALOGE("%s: Request %d: Input buffer not from input stream!",
-                __FUNCTION__, frameNumber);
-        ALOGV("%s: Bad stream %p, expected: %p",
-              __FUNCTION__, request->input_buffer->stream,
-              mInputStream);
-        ALOGV("%s: Bad stream type %d, expected stream type %d",
-              __FUNCTION__, request->input_buffer->stream->stream_type,
-              mInputStream ? mInputStream->stream_type : -1);
-
-        return BAD_VALUE;
-    }
-
-    if (request->num_output_buffers < 1 || request->output_buffers == NULL) {
-        ALOGE("%s: Request %d: No output buffers provided!",
-                __FUNCTION__, frameNumber);
-        return BAD_VALUE;
-    }
-
-    // Validate all buffers, starting with input buffer if it's given
-
-    ssize_t idx;
-    const camera3_stream_buffer_t *b;
-    if (request->input_buffer != NULL) {
-        idx = -1;
-        b = request->input_buffer;
-    } else {
-        idx = 0;
-        b = request->output_buffers;
-    }
-    do {
-        PrivateStreamInfo *priv =
-                static_cast<PrivateStreamInfo*>(b->stream->priv);
-        if (priv == NULL) {
-            ALOGE("%s: Request %d: Buffer %zu: Unconfigured stream!",
-                    __FUNCTION__, frameNumber, idx);
-            return BAD_VALUE;
+    if (res == OK) {
+      // Lock buffer for writing
+      if (srcBuf.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+        if (destBuf.format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+          android_ycbcr ycbcr = android_ycbcr();
+          res = GrallocModule::getInstance().lock_ycbcr(
+              *(destBuf.buffer), GRALLOC_USAGE_HW_CAMERA_WRITE, 0, 0,
+              destBuf.width, destBuf.height, &ycbcr);
+          // This is only valid because we know that emulator's
+          // YCbCr_420_888 is really contiguous NV21 under the hood
+          destBuf.img = static_cast<uint8_t *>(ycbcr.y);
+        } else {
+          ALOGE("Unexpected private format for flexible YUV: 0x%x",
+                destBuf.format);
+          res = INVALID_OPERATION;
         }
-        if (!priv->alive) {
-            ALOGE("%s: Request %d: Buffer %zu: Dead stream!",
-                    __FUNCTION__, frameNumber, idx);
-            return BAD_VALUE;
-        }
-        if (b->status != CAMERA3_BUFFER_STATUS_OK) {
-            ALOGE("%s: Request %d: Buffer %zu: Status not OK!",
-                    __FUNCTION__, frameNumber, idx);
-            return BAD_VALUE;
-        }
-        if (b->release_fence != -1) {
-            ALOGE("%s: Request %d: Buffer %zu: Has a release fence!",
-                    __FUNCTION__, frameNumber, idx);
-            return BAD_VALUE;
-        }
-        if (b->buffer == NULL) {
-            ALOGE("%s: Request %d: Buffer %zu: NULL buffer handle!",
-                    __FUNCTION__, frameNumber, idx);
-            return BAD_VALUE;
-        }
-        idx++;
-        b = &(request->output_buffers[idx]);
-    } while (idx < (ssize_t)request->num_output_buffers);
-
-    // TODO: Validate settings parameters
-
-    /**
-     * Start processing this request
-     */
-
-    mStatus = STATUS_ACTIVE;
-
-    CameraMetadata settings;
-
-    if (request->settings == NULL) {
-        settings.acquire(mPrevSettings);
-    } else {
-        settings = request->settings;
+      } else {
+        res = GrallocModule::getInstance().lock(
+            *(destBuf.buffer), GRALLOC_USAGE_HW_CAMERA_WRITE, 0, 0,
+            destBuf.width, destBuf.height, (void **)&(destBuf.img));
+      }
+      if (res != OK) {
+        ALOGE("%s: Request %d: Buffer %zu: Unable to lock buffer", __FUNCTION__,
+              frameNumber, i);
+      }
     }
 
-    res = process3A(settings);
     if (res != OK) {
-        return res;
+      // Either waiting or locking failed. Unlock locked buffers and bail
+      // out.
+      for (size_t j = 0; j < i; j++) {
+        GrallocModule::getInstance().unlock(
+            *(request->output_buffers[i].buffer));
+      }
+      delete sensorBuffers;
+      delete buffers;
+      return NO_INIT;
     }
 
-    // TODO: Handle reprocessing
+    sensorBuffers->push_back(destBuf);
+    buffers->push_back(srcBuf);
+  }
 
-    /**
-     * Get ready for sensor config
-     */
-
-    nsecs_t  exposureTime;
-    nsecs_t  frameDuration;
-    uint32_t sensitivity;
-    bool     needJpeg = false;
-    camera_metadata_entry_t entry;
-
-    entry = settings.find(ANDROID_SENSOR_EXPOSURE_TIME);
-    exposureTime = (entry.count > 0) ? entry.data.i64[0] : Sensor::kExposureTimeRange[0];
-    entry = settings.find(ANDROID_SENSOR_FRAME_DURATION);
-    frameDuration = (entry.count > 0)? entry.data.i64[0] : Sensor::kFrameDurationRange[0];
-    entry = settings.find(ANDROID_SENSOR_SENSITIVITY);
-    sensitivity = (entry.count > 0) ? entry.data.i32[0] : Sensor::kSensitivityRange[0];
-
-    if (exposureTime > frameDuration) {
-        frameDuration = exposureTime + Sensor::kMinVerticalBlank;
-        settings.update(ANDROID_SENSOR_FRAME_DURATION, &frameDuration, 1);
+  /**
+   * Wait for JPEG compressor to not be busy, if needed
+   */
+  if (needJpeg) {
+    bool ready = mJpegCompressor->waitForDone(kJpegTimeoutNs);
+    if (!ready) {
+      ALOGE("%s: Timeout waiting for JPEG compression to complete!",
+            __FUNCTION__);
+      return NO_INIT;
     }
-
-    Buffers *sensorBuffers = new Buffers();
-    HalBufferVector *buffers = new HalBufferVector();
-
-    sensorBuffers->setCapacity(request->num_output_buffers);
-    buffers->setCapacity(request->num_output_buffers);
-
-    // Process all the buffers we got for output, constructing internal buffer
-    // structures for them, and lock them for writing.
-    for (size_t i = 0; i < request->num_output_buffers; i++) {
-        const camera3_stream_buffer &srcBuf = request->output_buffers[i];
-        StreamBuffer destBuf;
-        destBuf.streamId = kGenericStreamId;
-        destBuf.width    = srcBuf.stream->width;
-        destBuf.height   = srcBuf.stream->height;
-        // For GCE, IMPLEMENTATION_DEFINED is always RGBx_8888
-        destBuf.format = (srcBuf.stream->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) ?
-                HAL_PIXEL_FORMAT_RGBA_8888 :
-                srcBuf.stream->format;
-        destBuf.stride   = srcBuf.stream->width;
-        destBuf.dataSpace = srcBuf.stream->data_space;
-        destBuf.buffer   = srcBuf.buffer;
-
-        if (destBuf.format == HAL_PIXEL_FORMAT_BLOB) {
-            needJpeg = true;
-        }
-
-        // Wait on fence
-        sp<Fence> bufferAcquireFence = new Fence(srcBuf.acquire_fence);
-        res = bufferAcquireFence->wait(kFenceTimeoutMs);
-        if (res == TIMED_OUT) {
-            ALOGE("%s: Request %d: Buffer %zu: Fence timed out after %d ms",
-                    __FUNCTION__, frameNumber, i, kFenceTimeoutMs);
-        }
-        if (res == OK) {
-            // Lock buffer for writing
-            if (srcBuf.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
-                if (destBuf.format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
-                    android_ycbcr ycbcr = android_ycbcr();
-                    res = GrallocModule::getInstance().lock_ycbcr(
-                        *(destBuf.buffer),
-                        GRALLOC_USAGE_HW_CAMERA_WRITE,
-                        0, 0, destBuf.width, destBuf.height,
-                        &ycbcr);
-                    // This is only valid because we know that emulator's
-                    // YCbCr_420_888 is really contiguous NV21 under the hood
-                    destBuf.img = static_cast<uint8_t*>(ycbcr.y);
-                } else {
-                    ALOGE("Unexpected private format for flexible YUV: 0x%x",
-                            destBuf.format);
-                    res = INVALID_OPERATION;
-                }
-            } else {
-                res = GrallocModule::getInstance().lock(
-                    *(destBuf.buffer),
-                    GRALLOC_USAGE_HW_CAMERA_WRITE,
-                    0, 0, destBuf.width, destBuf.height,
-                    (void**)&(destBuf.img));
-
-            }
-            if (res != OK) {
-                ALOGE("%s: Request %d: Buffer %zu: Unable to lock buffer",
-                        __FUNCTION__, frameNumber, i);
-            }
-        }
-
-        if (res != OK) {
-            // Either waiting or locking failed. Unlock locked buffers and bail
-            // out.
-            for (size_t j = 0; j < i; j++) {
-                GrallocModule::getInstance().unlock(
-                        *(request->output_buffers[i].buffer));
-            }
-            delete sensorBuffers;
-            delete buffers;
-            return NO_INIT;
-        }
-
-        sensorBuffers->push_back(destBuf);
-        buffers->push_back(srcBuf);
-    }
-
-    /**
-     * Wait for JPEG compressor to not be busy, if needed
-     */
-    if (needJpeg) {
-        bool ready = mJpegCompressor->waitForDone(kJpegTimeoutNs);
-        if (!ready) {
-            ALOGE("%s: Timeout waiting for JPEG compression to complete!",
-                    __FUNCTION__);
-            return NO_INIT;
-        }
-        res = mJpegCompressor->reserve();
-        if (res != OK) {
-            ALOGE("%s: Error managing JPEG compressor resources, can't reserve it!", __FUNCTION__);
-            return NO_INIT;
-        }
-    }
-
-    /**
-     * Wait until the in-flight queue has room
-     */
-    res = mReadoutThread->waitForReadout();
+    res = mJpegCompressor->reserve();
     if (res != OK) {
-        ALOGE("%s: Timeout waiting for previous requests to complete!",
-                __FUNCTION__);
-        return NO_INIT;
+      ALOGE("%s: Error managing JPEG compressor resources, can't reserve it!",
+            __FUNCTION__);
+      return NO_INIT;
     }
+  }
 
-    /**
-     * Wait until sensor's ready. This waits for lengthy amounts of time with
-     * mLock held, but the interface spec is that no other calls may by done to
-     * the HAL by the framework while process_capture_request is happening.
-     */
-    int syncTimeoutCount = 0;
-    while(!mSensor->waitForVSync(kSyncWaitTimeout)) {
-        if (mStatus == STATUS_ERROR) {
-            return NO_INIT;
-        }
-        if (syncTimeoutCount == kMaxSyncTimeoutCount) {
-            ALOGE("%s: Request %d: Sensor sync timed out after %" PRId64 " ms",
-                    __FUNCTION__, frameNumber,
-                    kSyncWaitTimeout * kMaxSyncTimeoutCount / 1000000);
-            return NO_INIT;
-        }
-        syncTimeoutCount++;
+  /**
+   * Wait until the in-flight queue has room
+   */
+  res = mReadoutThread->waitForReadout();
+  if (res != OK) {
+    ALOGE("%s: Timeout waiting for previous requests to complete!",
+          __FUNCTION__);
+    return NO_INIT;
+  }
+
+  /**
+   * Wait until sensor's ready. This waits for lengthy amounts of time with
+   * mLock held, but the interface spec is that no other calls may by done to
+   * the HAL by the framework while process_capture_request is happening.
+   */
+  int syncTimeoutCount = 0;
+  while (!mSensor->waitForVSync(kSyncWaitTimeout)) {
+    if (mStatus == STATUS_ERROR) {
+      return NO_INIT;
     }
+    if (syncTimeoutCount == kMaxSyncTimeoutCount) {
+      ALOGE("%s: Request %d: Sensor sync timed out after %" PRId64 " ms",
+            __FUNCTION__, frameNumber,
+            kSyncWaitTimeout * kMaxSyncTimeoutCount / 1000000);
+      return NO_INIT;
+    }
+    syncTimeoutCount++;
+  }
 
-    /**
-     * Configure sensor and queue up the request to the readout thread
-     */
-    mSensor->setExposureTime(exposureTime);
-    mSensor->setFrameDuration(frameDuration);
-    mSensor->setSensitivity(sensitivity);
-    mSensor->setDestinationBuffers(sensorBuffers);
-    mSensor->setFrameNumber(request->frame_number);
+  /**
+   * Configure sensor and queue up the request to the readout thread
+   */
+  mSensor->setExposureTime(exposureTime);
+  mSensor->setFrameDuration(frameDuration);
+  mSensor->setSensitivity(sensitivity);
+  mSensor->setDestinationBuffers(sensorBuffers);
+  mSensor->setFrameNumber(request->frame_number);
 
-    ReadoutThread::Request r;
-    r.frameNumber = request->frame_number;
-    r.settings = settings;
-    r.sensorBuffers = sensorBuffers;
-    r.buffers = buffers;
+  ReadoutThread::Request r;
+  r.frameNumber = request->frame_number;
+  r.settings = settings;
+  r.sensorBuffers = sensorBuffers;
+  r.buffers = buffers;
 
-    mReadoutThread->queueCaptureRequest(r);
-    ALOGVV("%s: Queued frame %d", __FUNCTION__, request->frame_number);
+  mReadoutThread->queueCaptureRequest(r);
+  ALOGVV("%s: Queued frame %d", __FUNCTION__, request->frame_number);
 
-    // Cache the settings for next time
-    mPrevSettings.acquire(settings);
+  // Cache the settings for next time
+  mPrevSettings.acquire(settings);
 
-    return OK;
+  return OK;
 }
 
 status_t EmulatedFakeCamera3::flush() {
-    ALOGW("%s: Not implemented; ignored", __FUNCTION__);
-    return OK;
+  ALOGW("%s: Not implemented; ignored", __FUNCTION__);
+  return OK;
 }
 
 /** Debug methods */
 
-void EmulatedFakeCamera3::dump(int fd) {
-
-}
+void EmulatedFakeCamera3::dump(int fd) {}
 
 /**
  * Private methods
  */
 
 status_t EmulatedFakeCamera3::getCameraCapabilities() {
+  const char *key =
+      mFacingBack ? "qemu.sf.back_camera_caps" : "qemu.sf.front_camera_caps";
 
-    const char *key = mFacingBack ? "qemu.sf.back_camera_caps" : "qemu.sf.front_camera_caps";
-
-    /* Defined by 'qemu.sf.*_camera_caps' boot property: if the
-     * property doesn't exist, it is assumed to list FULL. */
-    char prop[PROPERTY_VALUE_MAX];
-    if (property_get(key, prop, NULL) > 0) {
-        char *saveptr = nullptr;
-        char *cap = strtok_r(prop, " ,", &saveptr);
-        while (cap != NULL) {
-            for (int i = 0; i < NUM_CAPABILITIES; i++) {
-                if (!strcasecmp(cap, sAvailableCapabilitiesStrings[i])) {
-                    mCapabilities.add(static_cast<AvailableCapabilities>(i));
-                    break;
-                }
-            }
-            cap = strtok_r(NULL, " ,", &saveptr);
+  /* Defined by 'qemu.sf.*_camera_caps' boot property: if the
+   * property doesn't exist, it is assumed to list FULL. */
+  char prop[PROPERTY_VALUE_MAX];
+  if (property_get(key, prop, NULL) > 0) {
+    char *saveptr = nullptr;
+    char *cap = strtok_r(prop, " ,", &saveptr);
+    while (cap != NULL) {
+      for (int i = 0; i < NUM_CAPABILITIES; i++) {
+        if (!strcasecmp(cap, sAvailableCapabilitiesStrings[i])) {
+          mCapabilities.add(static_cast<AvailableCapabilities>(i));
+          break;
         }
-        if (mCapabilities.size() == 0) {
-            ALOGE("qemu.sf.back_camera_caps had no valid capabilities: %s", prop);
-        }
+      }
+      cap = strtok_r(NULL, " ,", &saveptr);
     }
-    // Default to FULL_LEVEL plus RAW if nothing is defined
     if (mCapabilities.size() == 0) {
-        mCapabilities.add(FULL_LEVEL);
-        mCapabilities.add(RAW);
+      ALOGE("qemu.sf.back_camera_caps had no valid capabilities: %s", prop);
     }
+  }
+  // Default to FULL_LEVEL plus RAW if nothing is defined
+  if (mCapabilities.size() == 0) {
+    mCapabilities.add(FULL_LEVEL);
+    mCapabilities.add(RAW);
+  }
 
-    // Add level-based caps
-    if (hasCapability(FULL_LEVEL)) {
-        mCapabilities.add(BURST_CAPTURE);
-        mCapabilities.add(READ_SENSOR_SETTINGS);
-        mCapabilities.add(MANUAL_SENSOR);
-        mCapabilities.add(MANUAL_POST_PROCESSING);
-    };
+  // Add level-based caps
+  if (hasCapability(FULL_LEVEL)) {
+    mCapabilities.add(BURST_CAPTURE);
+    mCapabilities.add(READ_SENSOR_SETTINGS);
+    mCapabilities.add(MANUAL_SENSOR);
+    mCapabilities.add(MANUAL_POST_PROCESSING);
+  };
 
-    // Backwards-compatible is required for most other caps
-    // Not required for DEPTH_OUTPUT, though.
-    if (hasCapability(BURST_CAPTURE) ||
-            hasCapability(READ_SENSOR_SETTINGS) ||
-            hasCapability(RAW) ||
-            hasCapability(MANUAL_SENSOR) ||
-            hasCapability(MANUAL_POST_PROCESSING) ||
-            hasCapability(PRIVATE_REPROCESSING) ||
-            hasCapability(YUV_REPROCESSING) ||
-            hasCapability(CONSTRAINED_HIGH_SPEED_VIDEO)) {
-        mCapabilities.add(BACKWARD_COMPATIBLE);
-    }
+  // Backwards-compatible is required for most other caps
+  // Not required for DEPTH_OUTPUT, though.
+  if (hasCapability(BURST_CAPTURE) || hasCapability(READ_SENSOR_SETTINGS) ||
+      hasCapability(RAW) || hasCapability(MANUAL_SENSOR) ||
+      hasCapability(MANUAL_POST_PROCESSING) ||
+      hasCapability(PRIVATE_REPROCESSING) || hasCapability(YUV_REPROCESSING) ||
+      hasCapability(CONSTRAINED_HIGH_SPEED_VIDEO)) {
+    mCapabilities.add(BACKWARD_COMPATIBLE);
+  }
 
-    ALOGI("Camera %d capabilities:", mCameraID);
-    for (size_t i = 0; i < mCapabilities.size(); i++) {
-        ALOGI("  %s", sAvailableCapabilitiesStrings[mCapabilities[i]]);
-    }
+  ALOGI("Camera %d capabilities:", mCameraID);
+  for (size_t i = 0; i < mCapabilities.size(); i++) {
+    ALOGI("  %s", sAvailableCapabilitiesStrings[mCapabilities[i]]);
+  }
 
-    return OK;
+  return OK;
 }
 
 bool EmulatedFakeCamera3::hasCapability(AvailableCapabilities cap) {
-    ssize_t idx = mCapabilities.indexOf(cap);
-    return idx >= 0;
+  ssize_t idx = mCapabilities.indexOf(cap);
+  return idx >= 0;
 }
 
 status_t EmulatedFakeCamera3::constructStaticInfo(
-    const cvd::CameraDefinition& params) {
+    const cvd::CameraDefinition &params) {
+  CameraMetadata info;
+  Vector<int32_t> availableCharacteristicsKeys;
+  status_t res;
 
-    CameraMetadata info;
-    Vector<int32_t> availableCharacteristicsKeys;
-    status_t res;
+  char *param_end;
+  int32_t width = 0, height = 0;
 
-    char* param_end;
-    int32_t width = 0, height = 0;
-
-    /* TODO(ender): this currently supports only maximum resolution. */
-    for (size_t index = 0; index < params.resolutions.size(); ++index) {
-        if (width <= params.resolutions[index].width &&
-            height <= params.resolutions[index].height) {
-            width = params.resolutions[index].width;
-            height = params.resolutions[index].height;
-        }
+  /* TODO(ender): this currently supports only maximum resolution. */
+  for (size_t index = 0; index < params.resolutions.size(); ++index) {
+    if (width <= params.resolutions[index].width &&
+        height <= params.resolutions[index].height) {
+      width = params.resolutions[index].width;
+      height = params.resolutions[index].height;
     }
+  }
 
-    if (width < 640 || height < 480) {
-        width = 640;
-        height = 480;
-    }
+  if (width < 640 || height < 480) {
+    width = 640;
+    height = 480;
+  }
 
-    mSensorWidth = width;
-    mSensorHeight = height;
+  mSensorWidth = width;
+  mSensorHeight = height;
 
 #define ADD_STATIC_ENTRY(name, varptr, count) \
-        availableCharacteristicsKeys.add(name);   \
-        res = info.update(name, varptr, count); \
-        if (res != OK) return res
+  availableCharacteristicsKeys.add(name);     \
+  res = info.update(name, varptr, count);     \
+  if (res != OK) return res
 
-    // android.sensor
+  // android.sensor
 
-    if (hasCapability(MANUAL_SENSOR)) {
+  if (hasCapability(MANUAL_SENSOR)) {
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+                     Sensor::kExposureTimeRange, 2);
 
-        ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
-                Sensor::kExposureTimeRange, 2);
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+                     &Sensor::kFrameDurationRange[1], 1);
 
-        ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
-                &Sensor::kFrameDurationRange[1], 1);
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+                     Sensor::kSensitivityRange,
+                     sizeof(Sensor::kSensitivityRange) / sizeof(int32_t));
 
-        ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
-                Sensor::kSensitivityRange,
-                sizeof(Sensor::kSensitivityRange)
-                /sizeof(int32_t));
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY,
+                     &Sensor::kSensitivityRange[1], 1);
+  }
 
-        ADD_STATIC_ENTRY(ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY,
-                &Sensor::kSensitivityRange[1], 1);
+  static const float sensorPhysicalSize[2] = {3.20f, 2.40f};  // mm
+  ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_PHYSICAL_SIZE, sensorPhysicalSize, 2);
+
+  const int32_t pixelArray[] = {mSensorWidth, mSensorHeight};
+  ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE, pixelArray, 2);
+  const int32_t activeArray[] = {0, 0, mSensorWidth, mSensorHeight};
+  ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE, activeArray, 4);
+
+  static const int32_t orientation = 90;  // Aligned with 'long edge'
+  ADD_STATIC_ENTRY(ANDROID_SENSOR_ORIENTATION, &orientation, 1);
+
+  static const uint8_t timestampSource =
+      ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME;
+  ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE, &timestampSource, 1);
+
+  if (hasCapability(RAW)) {
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+                     &Sensor::kColorFilterArrangement, 1);
+
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_WHITE_LEVEL,
+                     (int32_t *)&Sensor::kMaxRawValue, 1);
+
+    static const int32_t blackLevelPattern[4] = {
+        (int32_t)Sensor::kBlackLevel, (int32_t)Sensor::kBlackLevel,
+        (int32_t)Sensor::kBlackLevel, (int32_t)Sensor::kBlackLevel};
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_BLACK_LEVEL_PATTERN, blackLevelPattern,
+                     sizeof(blackLevelPattern) / sizeof(int32_t));
+  }
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const int32_t availableTestPatternModes[] = {
+        ANDROID_SENSOR_TEST_PATTERN_MODE_OFF};
+    ADD_STATIC_ENTRY(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
+                     availableTestPatternModes,
+                     sizeof(availableTestPatternModes) / sizeof(int32_t));
+  }
+
+  // android.lens
+
+  static const float focalLength = 3.30f;  // mm
+  ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, &focalLength, 1);
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    // 5 cm min focus distance for back camera, infinity (fixed focus) for front
+    const float minFocusDistance = mFacingBack ? 1.0 / 0.05 : 0.0;
+    ADD_STATIC_ENTRY(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+                     &minFocusDistance, 1);
+
+    // 5 m hyperfocal distance for back camera, infinity (fixed focus) for front
+    const float hyperFocalDistance = mFacingBack ? 1.0 / 5.0 : 0.0;
+    ADD_STATIC_ENTRY(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, &hyperFocalDistance,
+                     1);
+
+    static const float aperture = 2.8f;
+    ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_APERTURES, &aperture, 1);
+    static const float filterDensity = 0;
+    ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES,
+                     &filterDensity, 1);
+    static const uint8_t availableOpticalStabilization =
+        ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+    ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+                     &availableOpticalStabilization, 1);
+
+    static const int32_t lensShadingMapSize[] = {1, 1};
+    ADD_STATIC_ENTRY(ANDROID_LENS_INFO_SHADING_MAP_SIZE, lensShadingMapSize,
+                     sizeof(lensShadingMapSize) / sizeof(int32_t));
+
+    static const uint8_t lensFocusCalibration =
+        ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE;
+    ADD_STATIC_ENTRY(ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION,
+                     &lensFocusCalibration, 1);
+  }
+
+  if (hasCapability(DEPTH_OUTPUT)) {
+    // These could be included for non-DEPTH capability as well, but making this
+    // variable for testing coverage
+
+    // 90 degree rotation to align with long edge of a phone device that's by
+    // default portrait
+    static const float qO[] = {0.707107f, 0.f, 0.f, 0.707107f};
+
+    // Either a 180-degree rotation for back-facing, or no rotation for
+    // front-facing
+    const float qF[] = {0, (mFacingBack ? 1.f : 0.f), 0,
+                        (mFacingBack ? 0.f : 1.f)};
+
+    // Quarternion product, orientation change then facing
+    const float lensPoseRotation[] = {
+        qO[0] * qF[0] - qO[1] * qF[1] - qO[2] * qF[2] - qO[3] * qF[3],
+        qO[0] * qF[1] + qO[1] * qF[0] + qO[2] * qF[3] - qO[3] * qF[2],
+        qO[0] * qF[2] + qO[2] * qF[0] + qO[1] * qF[3] - qO[3] * qF[1],
+        qO[0] * qF[3] + qO[3] * qF[0] + qO[1] * qF[2] - qO[2] * qF[1]};
+
+    ADD_STATIC_ENTRY(ANDROID_LENS_POSE_ROTATION, lensPoseRotation,
+                     sizeof(lensPoseRotation) / sizeof(float));
+
+    // Only one camera facing each way, so 0 translation needed to the center of
+    // the 'main' camera
+    static const float lensPoseTranslation[] = {0.f, 0.f, 0.f};
+
+    ADD_STATIC_ENTRY(ANDROID_LENS_POSE_TRANSLATION, lensPoseTranslation,
+                     sizeof(lensPoseTranslation) / sizeof(float));
+
+    // Intrinsics are 'ideal' (f_x, f_y, c_x, c_y, s) match focal length and
+    // active array size
+    float f_x = focalLength * mSensorWidth / sensorPhysicalSize[0];
+    float f_y = focalLength * mSensorHeight / sensorPhysicalSize[1];
+    float c_x = mSensorWidth / 2.f;
+    float c_y = mSensorHeight / 2.f;
+    float s = 0.f;
+    const float lensIntrinsics[] = {f_x, f_y, c_x, c_y, s};
+
+    ADD_STATIC_ENTRY(ANDROID_LENS_INTRINSIC_CALIBRATION, lensIntrinsics,
+                     sizeof(lensIntrinsics) / sizeof(float));
+
+    // No radial or tangential distortion
+
+    float lensRadialDistortion[] = {1.0f, 0.f, 0.f, 0.f, 0.f, 0.f};
+
+    ADD_STATIC_ENTRY(ANDROID_LENS_RADIAL_DISTORTION, lensRadialDistortion,
+                     sizeof(lensRadialDistortion) / sizeof(float));
+  }
+
+  const uint8_t lensFacing =
+      mFacingBack ? ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT;
+  ADD_STATIC_ENTRY(ANDROID_LENS_FACING, &lensFacing, 1);
+
+  // android.flash
+
+  const uint8_t flashAvailable = mFacingBack;
+  ADD_STATIC_ENTRY(ANDROID_FLASH_INFO_AVAILABLE, &flashAvailable, 1);
+
+  // android.tonemap
+
+  if (hasCapability(MANUAL_POST_PROCESSING)) {
+    static const int32_t tonemapCurvePoints = 128;
+    ADD_STATIC_ENTRY(ANDROID_TONEMAP_MAX_CURVE_POINTS, &tonemapCurvePoints, 1);
+
+    static const uint8_t availableToneMapModes[] = {
+        ANDROID_TONEMAP_MODE_CONTRAST_CURVE, ANDROID_TONEMAP_MODE_FAST,
+        ANDROID_TONEMAP_MODE_HIGH_QUALITY};
+    ADD_STATIC_ENTRY(ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES,
+                     availableToneMapModes, sizeof(availableToneMapModes));
+  }
+
+  // android.scaler
+
+  const std::vector<int32_t> availableStreamConfigurationsBasic = {
+      HAL_PIXEL_FORMAT_BLOB,
+      width,
+      height,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+      HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+      320,
+      240,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+      HAL_PIXEL_FORMAT_YCbCr_420_888,
+      320,
+      240,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+      HAL_PIXEL_FORMAT_BLOB,
+      320,
+      240,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+  };
+
+  // Always need to include 640x480 in basic formats
+  const std::vector<int32_t> availableStreamConfigurationsBasic640 = {
+      HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+      640,
+      480,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+      HAL_PIXEL_FORMAT_YCbCr_420_888,
+      640,
+      480,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+      HAL_PIXEL_FORMAT_BLOB,
+      640,
+      480,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT};
+
+  const std::vector<int32_t> availableStreamConfigurationsRaw = {
+      HAL_PIXEL_FORMAT_RAW16,
+      width,
+      height,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+  };
+
+  const std::vector<int32_t> availableStreamConfigurationsBurst = {
+      HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+      width,
+      height,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+      HAL_PIXEL_FORMAT_YCbCr_420_888,
+      width,
+      height,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+      HAL_PIXEL_FORMAT_RGBA_8888,
+      width,
+      height,
+      ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+  };
+
+  std::vector<int32_t> availableStreamConfigurations;
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    availableStreamConfigurations.insert(
+        availableStreamConfigurations.end(),
+        availableStreamConfigurationsBasic.begin(),
+        availableStreamConfigurationsBasic.end());
+    if (width > 640) {
+      availableStreamConfigurations.insert(
+          availableStreamConfigurations.end(),
+          availableStreamConfigurationsBasic640.begin(),
+          availableStreamConfigurationsBasic640.end());
     }
+  }
+  if (hasCapability(RAW)) {
+    availableStreamConfigurations.insert(
+        availableStreamConfigurations.end(),
+        availableStreamConfigurationsRaw.begin(),
+        availableStreamConfigurationsRaw.end());
+  }
+  if (hasCapability(BURST_CAPTURE)) {
+    availableStreamConfigurations.insert(
+        availableStreamConfigurations.end(),
+        availableStreamConfigurationsBurst.begin(),
+        availableStreamConfigurationsBurst.end());
+  }
 
-    static const float sensorPhysicalSize[2] = {3.20f, 2.40f}; // mm
-    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
-            sensorPhysicalSize, 2);
+  if (availableStreamConfigurations.size() > 0) {
+    ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+                     &availableStreamConfigurations[0],
+                     availableStreamConfigurations.size());
+  }
 
-    const int32_t pixelArray[] = {mSensorWidth, mSensorHeight};
-    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
-            pixelArray, 2);
-    const int32_t activeArray[] = {0, 0, mSensorWidth, mSensorHeight};
-    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
-            activeArray, 4);
+  const std::vector<int64_t> availableMinFrameDurationsBasic = {
+      HAL_PIXEL_FORMAT_BLOB,
+      width,
+      height,
+      Sensor::kFrameDurationRange[0],
+      HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+      320,
+      240,
+      Sensor::kFrameDurationRange[0],
+      HAL_PIXEL_FORMAT_YCbCr_420_888,
+      320,
+      240,
+      Sensor::kFrameDurationRange[0],
+      HAL_PIXEL_FORMAT_BLOB,
+      320,
+      240,
+      Sensor::kFrameDurationRange[0],
+  };
 
-    static const int32_t orientation = 90; // Aligned with 'long edge'
-    ADD_STATIC_ENTRY(ANDROID_SENSOR_ORIENTATION, &orientation, 1);
+  // Always need to include 640x480 in basic formats
+  const std::vector<int64_t> availableMinFrameDurationsBasic640 = {
+      HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+      640,
+      480,
+      Sensor::kFrameDurationRange[0],
+      HAL_PIXEL_FORMAT_YCbCr_420_888,
+      640,
+      480,
+      Sensor::kFrameDurationRange[0],
+      HAL_PIXEL_FORMAT_BLOB,
+      640,
+      480,
+      Sensor::kFrameDurationRange[0]};
 
-    static const uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME;
-    ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE, &timestampSource, 1);
+  const std::vector<int64_t> availableMinFrameDurationsRaw = {
+      HAL_PIXEL_FORMAT_RAW16,
+      width,
+      height,
+      Sensor::kFrameDurationRange[0],
+  };
 
-    if (hasCapability(RAW)) {
-        ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
-                &Sensor::kColorFilterArrangement, 1);
+  const std::vector<int64_t> availableMinFrameDurationsBurst = {
+      HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+      width,
+      height,
+      Sensor::kFrameDurationRange[0],
+      HAL_PIXEL_FORMAT_YCbCr_420_888,
+      width,
+      height,
+      Sensor::kFrameDurationRange[0],
+      HAL_PIXEL_FORMAT_RGBA_8888,
+      width,
+      height,
+      Sensor::kFrameDurationRange[0],
+  };
 
-        ADD_STATIC_ENTRY(ANDROID_SENSOR_INFO_WHITE_LEVEL,
-                (int32_t*)&Sensor::kMaxRawValue, 1);
+  std::vector<int64_t> availableMinFrameDurations;
 
-        static const int32_t blackLevelPattern[4] = {
-            (int32_t)Sensor::kBlackLevel, (int32_t)Sensor::kBlackLevel,
-            (int32_t)Sensor::kBlackLevel, (int32_t)Sensor::kBlackLevel
-        };
-        ADD_STATIC_ENTRY(ANDROID_SENSOR_BLACK_LEVEL_PATTERN,
-                blackLevelPattern, sizeof(blackLevelPattern)/sizeof(int32_t));
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    availableMinFrameDurations.insert(availableMinFrameDurations.end(),
+                                      availableMinFrameDurationsBasic.begin(),
+                                      availableMinFrameDurationsBasic.end());
+    if (width > 640) {
+      availableMinFrameDurations.insert(
+          availableMinFrameDurations.end(),
+          availableMinFrameDurationsBasic640.begin(),
+          availableMinFrameDurationsBasic640.end());
     }
+  }
+  if (hasCapability(RAW)) {
+    availableMinFrameDurations.insert(availableMinFrameDurations.end(),
+                                      availableMinFrameDurationsRaw.begin(),
+                                      availableMinFrameDurationsRaw.end());
+  }
+  if (hasCapability(BURST_CAPTURE)) {
+    availableMinFrameDurations.insert(availableMinFrameDurations.end(),
+                                      availableMinFrameDurationsBurst.begin(),
+                                      availableMinFrameDurationsBurst.end());
+  }
 
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const int32_t availableTestPatternModes[] = {
-            ANDROID_SENSOR_TEST_PATTERN_MODE_OFF
-        };
-        ADD_STATIC_ENTRY(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
-                availableTestPatternModes, sizeof(availableTestPatternModes)/sizeof(int32_t));
+  if (availableMinFrameDurations.size() > 0) {
+    ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+                     &availableMinFrameDurations[0],
+                     availableMinFrameDurations.size());
+  }
+
+  const std::vector<int64_t> availableStallDurationsBasic = {
+      HAL_PIXEL_FORMAT_BLOB,
+      width,
+      height,
+      Sensor::kFrameDurationRange[0],
+      HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+      320,
+      240,
+      0,
+      HAL_PIXEL_FORMAT_YCbCr_420_888,
+      320,
+      240,
+      0,
+      HAL_PIXEL_FORMAT_RGBA_8888,
+      320,
+      240,
+      0,
+  };
+
+  // Always need to include 640x480 in basic formats
+  const std::vector<int64_t> availableStallDurationsBasic640 = {
+      HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+      640,
+      480,
+      0,
+      HAL_PIXEL_FORMAT_YCbCr_420_888,
+      640,
+      480,
+      0,
+      HAL_PIXEL_FORMAT_BLOB,
+      640,
+      480,
+      Sensor::kFrameDurationRange[0]};
+
+  const std::vector<int64_t> availableStallDurationsRaw = {
+      HAL_PIXEL_FORMAT_RAW16, width, height, Sensor::kFrameDurationRange[0]};
+  const std::vector<int64_t> availableStallDurationsBurst = {
+      HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+      width,
+      height,
+      0,
+      HAL_PIXEL_FORMAT_YCbCr_420_888,
+      width,
+      height,
+      0,
+      HAL_PIXEL_FORMAT_RGBA_8888,
+      width,
+      height,
+      0};
+
+  std::vector<int64_t> availableStallDurations;
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    availableStallDurations.insert(availableStallDurations.end(),
+                                   availableStallDurationsBasic.begin(),
+                                   availableStallDurationsBasic.end());
+    if (width > 640) {
+      availableStallDurations.insert(availableStallDurations.end(),
+                                     availableStallDurationsBasic640.begin(),
+                                     availableStallDurationsBasic640.end());
     }
+  }
+  if (hasCapability(RAW)) {
+    availableStallDurations.insert(availableStallDurations.end(),
+                                   availableStallDurationsRaw.begin(),
+                                   availableStallDurationsRaw.end());
+  }
+  if (hasCapability(BURST_CAPTURE)) {
+    availableStallDurations.insert(availableStallDurations.end(),
+                                   availableStallDurationsBurst.begin(),
+                                   availableStallDurationsBurst.end());
+  }
 
-    // android.lens
+  if (availableStallDurations.size() > 0) {
+    ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+                     &availableStallDurations[0],
+                     availableStallDurations.size());
+  }
 
-    static const float focalLength = 3.30f; // mm
-    ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
-            &focalLength, 1);
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_FREEFORM;
+    ADD_STATIC_ENTRY(ANDROID_SCALER_CROPPING_TYPE, &croppingType, 1);
 
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        // 5 cm min focus distance for back camera, infinity (fixed focus) for front
-        const float minFocusDistance = mFacingBack ? 1.0/0.05 : 0.0;
-        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
-                &minFocusDistance, 1);
+    static const float maxZoom = 10;
+    ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, &maxZoom, 1);
+  }
 
-        // 5 m hyperfocal distance for back camera, infinity (fixed focus) for front
-        const float hyperFocalDistance = mFacingBack ? 1.0/5.0 : 0.0;
-        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
-                &hyperFocalDistance, 1);
+  // android.jpeg
 
-        static const float aperture = 2.8f;
-        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
-                &aperture, 1);
-        static const float filterDensity = 0;
-        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES,
-                &filterDensity, 1);
-        static const uint8_t availableOpticalStabilization =
-                ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
-        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
-                &availableOpticalStabilization, 1);
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const int32_t jpegThumbnailSizes[] = {0, 0, 160, 120, 320, 240};
+    ADD_STATIC_ENTRY(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES, jpegThumbnailSizes,
+                     sizeof(jpegThumbnailSizes) / sizeof(int32_t));
 
-        static const int32_t lensShadingMapSize[] = {1, 1};
-        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_SHADING_MAP_SIZE, lensShadingMapSize,
-                sizeof(lensShadingMapSize)/sizeof(int32_t));
+    static const int32_t jpegMaxSize = JpegCompressor::kMaxJpegSize;
+    ADD_STATIC_ENTRY(ANDROID_JPEG_MAX_SIZE, &jpegMaxSize, 1);
+  }
 
-        static const uint8_t lensFocusCalibration =
-                ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE;
-        ADD_STATIC_ENTRY(ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION, &lensFocusCalibration, 1);
-    }
+  // android.stats
 
-    if (hasCapability(DEPTH_OUTPUT)) {
-        // These could be included for non-DEPTH capability as well, but making this variable for
-        // testing coverage
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t availableFaceDetectModes[] = {
+        ANDROID_STATISTICS_FACE_DETECT_MODE_OFF,
+        ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE,
+        ANDROID_STATISTICS_FACE_DETECT_MODE_FULL};
+    ADD_STATIC_ENTRY(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+                     availableFaceDetectModes,
+                     sizeof(availableFaceDetectModes));
 
-        // 90 degree rotation to align with long edge of a phone device that's by default portrait
-        static const float qO[] = { 0.707107f, 0.f, 0.f, 0.707107f};
+    static const int32_t maxFaceCount = 8;
+    ADD_STATIC_ENTRY(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, &maxFaceCount, 1);
 
-        // Either a 180-degree rotation for back-facing, or no rotation for front-facing
-        const float qF[] = {0, (mFacingBack ? 1.f : 0.f), 0, (mFacingBack ? 0.f : 1.f)};
+    static const uint8_t availableShadingMapModes[] = {
+        ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF};
+    ADD_STATIC_ENTRY(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES,
+                     availableShadingMapModes,
+                     sizeof(availableShadingMapModes));
+  }
 
-        // Quarternion product, orientation change then facing
-        const float lensPoseRotation[] = {qO[0]*qF[0] - qO[1]*qF[1] - qO[2]*qF[2] - qO[3]*qF[3],
-                                          qO[0]*qF[1] + qO[1]*qF[0] + qO[2]*qF[3] - qO[3]*qF[2],
-                                          qO[0]*qF[2] + qO[2]*qF[0] + qO[1]*qF[3] - qO[3]*qF[1],
-                                          qO[0]*qF[3] + qO[3]*qF[0] + qO[1]*qF[2] - qO[2]*qF[1]};
+  // android.sync
 
-        ADD_STATIC_ENTRY(ANDROID_LENS_POSE_ROTATION, lensPoseRotation,
-                sizeof(lensPoseRotation)/sizeof(float));
+  static const int32_t maxLatency =
+      hasCapability(FULL_LEVEL) ? ANDROID_SYNC_MAX_LATENCY_PER_FRAME_CONTROL
+                                : 3;
+  ADD_STATIC_ENTRY(ANDROID_SYNC_MAX_LATENCY, &maxLatency, 1);
 
-        // Only one camera facing each way, so 0 translation needed to the center of the 'main'
-        // camera
-        static const float lensPoseTranslation[] = {0.f, 0.f, 0.f};
+  // android.control
 
-        ADD_STATIC_ENTRY(ANDROID_LENS_POSE_TRANSLATION, lensPoseTranslation,
-                sizeof(lensPoseTranslation)/sizeof(float));
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t availableControlModes[] = {
+        ANDROID_CONTROL_MODE_OFF, ANDROID_CONTROL_MODE_AUTO,
+        ANDROID_CONTROL_MODE_USE_SCENE_MODE};
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_MODES, availableControlModes,
+                     sizeof(availableControlModes));
+  } else {
+    static const uint8_t availableControlModes[] = {ANDROID_CONTROL_MODE_AUTO};
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_MODES, availableControlModes,
+                     sizeof(availableControlModes));
+  }
 
-        // Intrinsics are 'ideal' (f_x, f_y, c_x, c_y, s) match focal length and active array size
-        float f_x = focalLength * mSensorWidth / sensorPhysicalSize[0];
-        float f_y = focalLength * mSensorHeight / sensorPhysicalSize[1];
-        float c_x = mSensorWidth / 2.f;
-        float c_y = mSensorHeight / 2.f;
-        float s = 0.f;
-        const float lensIntrinsics[] = { f_x, f_y, c_x, c_y, s };
+  static const uint8_t availableSceneModes[] = {
+      hasCapability(BACKWARD_COMPATIBLE)
+          ? ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY
+          : ANDROID_CONTROL_SCENE_MODE_DISABLED};
+  ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_SCENE_MODES, availableSceneModes,
+                   sizeof(availableSceneModes));
 
-        ADD_STATIC_ENTRY(ANDROID_LENS_INTRINSIC_CALIBRATION, lensIntrinsics,
-                sizeof(lensIntrinsics)/sizeof(float));
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t availableEffects[] = {ANDROID_CONTROL_EFFECT_MODE_OFF};
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_EFFECTS, availableEffects,
+                     sizeof(availableEffects));
+  }
 
-        // No radial or tangential distortion
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const int32_t max3aRegions[] = {/*AE*/ 1, /*AWB*/ 0, /*AF*/ 1};
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_MAX_REGIONS, max3aRegions,
+                     sizeof(max3aRegions) / sizeof(max3aRegions[0]));
 
-        float lensRadialDistortion[] = {1.0f, 0.f, 0.f, 0.f, 0.f, 0.f};
+    static const uint8_t availableAeModes[] = {ANDROID_CONTROL_AE_MODE_OFF,
+                                               ANDROID_CONTROL_AE_MODE_ON};
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_AVAILABLE_MODES, availableAeModes,
+                     sizeof(availableAeModes));
 
-        ADD_STATIC_ENTRY(ANDROID_LENS_RADIAL_DISTORTION, lensRadialDistortion,
-                sizeof(lensRadialDistortion)/sizeof(float));
+    static const camera_metadata_rational exposureCompensationStep = {1, 3};
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+                     &exposureCompensationStep, 1);
 
-    }
+    int32_t exposureCompensationRange[] = {-9, 9};
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+                     exposureCompensationRange,
+                     sizeof(exposureCompensationRange) / sizeof(int32_t));
+  }
 
+  static const int32_t availableTargetFpsRanges[] = {5,  30, 15, 30,
+                                                     15, 15, 30, 30};
+  ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+                   availableTargetFpsRanges,
+                   sizeof(availableTargetFpsRanges) / sizeof(int32_t));
 
-    const uint8_t lensFacing = mFacingBack ?
-            ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT;
-    ADD_STATIC_ENTRY(ANDROID_LENS_FACING, &lensFacing, 1);
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t availableAntibandingModes[] = {
+        ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
+        ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO};
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+                     availableAntibandingModes,
+                     sizeof(availableAntibandingModes));
+  }
 
-    // android.flash
+  static const uint8_t aeLockAvailable =
+      hasCapability(BACKWARD_COMPATIBLE)
+          ? ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE
+          : ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
 
-    const uint8_t flashAvailable = mFacingBack;
-    ADD_STATIC_ENTRY(ANDROID_FLASH_INFO_AVAILABLE, &flashAvailable, 1);
+  ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_LOCK_AVAILABLE, &aeLockAvailable, 1);
 
-    // android.tonemap
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t availableAwbModes[] = {
+        ANDROID_CONTROL_AWB_MODE_OFF,
+        ANDROID_CONTROL_AWB_MODE_AUTO,
+        ANDROID_CONTROL_AWB_MODE_INCANDESCENT,
+        ANDROID_CONTROL_AWB_MODE_FLUORESCENT,
+        ANDROID_CONTROL_AWB_MODE_DAYLIGHT,
+        ANDROID_CONTROL_AWB_MODE_SHADE};
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AWB_AVAILABLE_MODES, availableAwbModes,
+                     sizeof(availableAwbModes));
+  }
 
-    if (hasCapability(MANUAL_POST_PROCESSING)) {
-        static const int32_t tonemapCurvePoints = 128;
-        ADD_STATIC_ENTRY(ANDROID_TONEMAP_MAX_CURVE_POINTS, &tonemapCurvePoints, 1);
+  static const uint8_t awbLockAvailable =
+      hasCapability(BACKWARD_COMPATIBLE)
+          ? ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE
+          : ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
 
-        static const uint8_t availableToneMapModes[] = {
-            ANDROID_TONEMAP_MODE_CONTRAST_CURVE,  ANDROID_TONEMAP_MODE_FAST,
-            ANDROID_TONEMAP_MODE_HIGH_QUALITY
-        };
-        ADD_STATIC_ENTRY(ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES, availableToneMapModes,
-                sizeof(availableToneMapModes));
-    }
+  ADD_STATIC_ENTRY(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, &awbLockAvailable, 1);
 
-    // android.scaler
+  static const uint8_t availableAfModesBack[] = {
+      ANDROID_CONTROL_AF_MODE_OFF, ANDROID_CONTROL_AF_MODE_AUTO,
+      ANDROID_CONTROL_AF_MODE_MACRO, ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
+      ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE};
 
-    const std::vector<int32_t> availableStreamConfigurationsBasic = {
-        HAL_PIXEL_FORMAT_BLOB, width, height, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 320, 240, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-        HAL_PIXEL_FORMAT_YCbCr_420_888, 320, 240, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-        HAL_PIXEL_FORMAT_BLOB, 320, 240, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
+  static const uint8_t availableAfModesFront[] = {ANDROID_CONTROL_AF_MODE_OFF};
+
+  if (mFacingBack && hasCapability(BACKWARD_COMPATIBLE)) {
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AF_AVAILABLE_MODES, availableAfModesBack,
+                     sizeof(availableAfModesBack));
+  } else {
+    ADD_STATIC_ENTRY(ANDROID_CONTROL_AF_AVAILABLE_MODES, availableAfModesFront,
+                     sizeof(availableAfModesFront));
+  }
+
+  static const uint8_t availableVstabModes[] = {
+      ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF};
+  ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+                   availableVstabModes, sizeof(availableVstabModes));
+
+  // android.colorCorrection
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t availableAberrationModes[] = {
+        ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
+        ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST,
+        ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY};
+    ADD_STATIC_ENTRY(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+                     availableAberrationModes,
+                     sizeof(availableAberrationModes));
+  } else {
+    static const uint8_t availableAberrationModes[] = {
+        ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
     };
+    ADD_STATIC_ENTRY(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+                     availableAberrationModes,
+                     sizeof(availableAberrationModes));
+  }
+  // android.edge
 
-    // Always need to include 640x480 in basic formats
-    const std::vector<int32_t> availableStreamConfigurationsBasic640 = {
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 640, 480, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-        HAL_PIXEL_FORMAT_YCbCr_420_888, 640, 480, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-        HAL_PIXEL_FORMAT_BLOB, 640, 480, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t availableEdgeModes[] = {
+        ANDROID_EDGE_MODE_OFF, ANDROID_EDGE_MODE_FAST,
+        ANDROID_EDGE_MODE_HIGH_QUALITY};
+    ADD_STATIC_ENTRY(ANDROID_EDGE_AVAILABLE_EDGE_MODES, availableEdgeModes,
+                     sizeof(availableEdgeModes));
+  } else {
+    static const uint8_t availableEdgeModes[] = {ANDROID_EDGE_MODE_OFF};
+    ADD_STATIC_ENTRY(ANDROID_EDGE_AVAILABLE_EDGE_MODES, availableEdgeModes,
+                     sizeof(availableEdgeModes));
+  }
+
+  // android.info
+
+  static const uint8_t supportedHardwareLevel =
+      hasCapability(FULL_LEVEL) ? ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL
+                                : ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
+  ADD_STATIC_ENTRY(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
+                   &supportedHardwareLevel,
+                   /*count*/ 1);
+
+  // android.noiseReduction
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t availableNoiseReductionModes[] = {
+        ANDROID_NOISE_REDUCTION_MODE_OFF, ANDROID_NOISE_REDUCTION_MODE_FAST,
+        ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY};
+    ADD_STATIC_ENTRY(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+                     availableNoiseReductionModes,
+                     sizeof(availableNoiseReductionModes));
+  } else {
+    static const uint8_t availableNoiseReductionModes[] = {
+        ANDROID_NOISE_REDUCTION_MODE_OFF,
     };
+    ADD_STATIC_ENTRY(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+                     availableNoiseReductionModes,
+                     sizeof(availableNoiseReductionModes));
+  }
 
-    const std::vector<int32_t> availableStreamConfigurationsRaw = {
-        HAL_PIXEL_FORMAT_RAW16, width, height, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-    };
+  // android.depth
 
-    const std::vector<int32_t> availableStreamConfigurationsBurst = {
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, width, height, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-        HAL_PIXEL_FORMAT_YCbCr_420_888, width, height, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-        HAL_PIXEL_FORMAT_RGBA_8888, width, height, ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT,
-    };
+  if (hasCapability(DEPTH_OUTPUT)) {
+    static const int32_t maxDepthSamples = 100;
+    ADD_STATIC_ENTRY(ANDROID_DEPTH_MAX_DEPTH_SAMPLES, &maxDepthSamples, 1);
 
-    std::vector<int32_t> availableStreamConfigurations;
+    static const int32_t availableDepthStreamConfigurations[] = {
+        HAL_PIXEL_FORMAT_Y16,
+        160,
+        120,
+        ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_OUTPUT,
+        HAL_PIXEL_FORMAT_BLOB,
+        maxDepthSamples,
+        1,
+        ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_OUTPUT};
+    ADD_STATIC_ENTRY(
+        ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS,
+        availableDepthStreamConfigurations,
+        sizeof(availableDepthStreamConfigurations) / sizeof(int32_t));
 
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        availableStreamConfigurations.insert(availableStreamConfigurations.end(),
-                availableStreamConfigurationsBasic.begin(),
-                availableStreamConfigurationsBasic.end());
-        if (width > 640) {
-            availableStreamConfigurations.insert(availableStreamConfigurations.end(),
-                    availableStreamConfigurationsBasic640.begin(),
-                    availableStreamConfigurationsBasic640.end());
-        }
+    static const int64_t availableDepthMinFrameDurations[] = {
+        HAL_PIXEL_FORMAT_Y16,
+        160,
+        120,
+        Sensor::kFrameDurationRange[0],
+        HAL_PIXEL_FORMAT_BLOB,
+        maxDepthSamples,
+        1,
+        Sensor::kFrameDurationRange[0]};
+    ADD_STATIC_ENTRY(ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS,
+                     availableDepthMinFrameDurations,
+                     sizeof(availableDepthMinFrameDurations) / sizeof(int64_t));
+
+    static const int64_t availableDepthStallDurations[] = {
+        HAL_PIXEL_FORMAT_Y16,
+        160,
+        120,
+        Sensor::kFrameDurationRange[0],
+        HAL_PIXEL_FORMAT_BLOB,
+        maxDepthSamples,
+        1,
+        Sensor::kFrameDurationRange[0]};
+    ADD_STATIC_ENTRY(ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS,
+                     availableDepthStallDurations,
+                     sizeof(availableDepthStallDurations) / sizeof(int64_t));
+
+    uint8_t depthIsExclusive = ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_FALSE;
+    ADD_STATIC_ENTRY(ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE, &depthIsExclusive, 1);
+  }
+
+  // android.shading
+
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t availableShadingModes[] = {
+        ANDROID_SHADING_MODE_OFF, ANDROID_SHADING_MODE_FAST,
+        ANDROID_SHADING_MODE_HIGH_QUALITY};
+    ADD_STATIC_ENTRY(ANDROID_SHADING_AVAILABLE_MODES, availableShadingModes,
+                     sizeof(availableShadingModes));
+  } else {
+    static const uint8_t availableShadingModes[] = {ANDROID_SHADING_MODE_OFF};
+    ADD_STATIC_ENTRY(ANDROID_SHADING_AVAILABLE_MODES, availableShadingModes,
+                     sizeof(availableShadingModes));
+  }
+
+  // android.request
+
+  static const int32_t maxNumOutputStreams[] = {
+      kMaxRawStreamCount, kMaxProcessedStreamCount, kMaxJpegStreamCount};
+  ADD_STATIC_ENTRY(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, maxNumOutputStreams,
+                   3);
+
+  static const uint8_t maxPipelineDepth = kMaxBufferCount;
+  ADD_STATIC_ENTRY(ANDROID_REQUEST_PIPELINE_MAX_DEPTH, &maxPipelineDepth, 1);
+
+  static const int32_t partialResultCount = 1;
+  ADD_STATIC_ENTRY(ANDROID_REQUEST_PARTIAL_RESULT_COUNT, &partialResultCount,
+                   /*count*/ 1);
+
+  SortedVector<uint8_t> caps;
+  for (size_t i = 0; i < mCapabilities.size(); i++) {
+    switch (mCapabilities[i]) {
+      case BACKWARD_COMPATIBLE:
+        caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE);
+        break;
+      case MANUAL_SENSOR:
+        caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR);
+        break;
+      case MANUAL_POST_PROCESSING:
+        caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING);
+        break;
+      case RAW:
+        caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW);
+        break;
+      case PRIVATE_REPROCESSING:
+        caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING);
+        break;
+      case READ_SENSOR_SETTINGS:
+        caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS);
+        break;
+      case BURST_CAPTURE:
+        caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE);
+        break;
+      case YUV_REPROCESSING:
+        caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING);
+        break;
+      case DEPTH_OUTPUT:
+        caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT);
+        break;
+      case CONSTRAINED_HIGH_SPEED_VIDEO:
+        caps.add(
+            ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO);
+        break;
+      default:
+        // Ignore LEVELs
+        break;
     }
-    if (hasCapability(RAW)) {
-        availableStreamConfigurations.insert(availableStreamConfigurations.end(),
-                availableStreamConfigurationsRaw.begin(),
-                availableStreamConfigurationsRaw.end());
-    }
-    if (hasCapability(BURST_CAPTURE)) {
-        availableStreamConfigurations.insert(availableStreamConfigurations.end(),
-                availableStreamConfigurationsBurst.begin(),
-                availableStreamConfigurationsBurst.end());
-    }
+  }
+  ADD_STATIC_ENTRY(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, caps.array(),
+                   caps.size());
 
-    if (availableStreamConfigurations.size() > 0) {
-        ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
-                &availableStreamConfigurations[0],
-                availableStreamConfigurations.size());
-    }
+  // Scan a default request template for included request keys
+  Vector<int32_t> availableRequestKeys;
+  const camera_metadata_t *previewRequest =
+      constructDefaultRequestSettings(CAMERA3_TEMPLATE_PREVIEW);
+  for (size_t i = 0; i < get_camera_metadata_entry_count(previewRequest); i++) {
+    camera_metadata_ro_entry_t entry;
+    get_camera_metadata_ro_entry(previewRequest, i, &entry);
+    availableRequestKeys.add(entry.tag);
+  }
+  ADD_STATIC_ENTRY(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS,
+                   availableRequestKeys.array(), availableRequestKeys.size());
 
-    const std::vector<int64_t> availableMinFrameDurationsBasic = {
-        HAL_PIXEL_FORMAT_BLOB, width, height, Sensor::kFrameDurationRange[0],
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 320, 240, Sensor::kFrameDurationRange[0],
-        HAL_PIXEL_FORMAT_YCbCr_420_888, 320, 240, Sensor::kFrameDurationRange[0],
-        HAL_PIXEL_FORMAT_BLOB, 320, 240, Sensor::kFrameDurationRange[0],
-    };
+  // Add a few more result keys. Must be kept up to date with the various places
+  // that add these
 
-    // Always need to include 640x480 in basic formats
-    const std::vector<int64_t> availableMinFrameDurationsBasic640 = {
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 640, 480, Sensor::kFrameDurationRange[0],
-        HAL_PIXEL_FORMAT_YCbCr_420_888, 640, 480, Sensor::kFrameDurationRange[0],
-        HAL_PIXEL_FORMAT_BLOB, 640, 480, Sensor::kFrameDurationRange[0]
-    };
+  Vector<int32_t> availableResultKeys(availableRequestKeys);
+  if (hasCapability(BACKWARD_COMPATIBLE)) {
+    availableResultKeys.add(ANDROID_CONTROL_AE_STATE);
+    availableResultKeys.add(ANDROID_CONTROL_AF_STATE);
+    availableResultKeys.add(ANDROID_CONTROL_AWB_STATE);
+    availableResultKeys.add(ANDROID_FLASH_STATE);
+    availableResultKeys.add(ANDROID_LENS_STATE);
+    availableResultKeys.add(ANDROID_LENS_FOCUS_RANGE);
+    availableResultKeys.add(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW);
+    availableResultKeys.add(ANDROID_STATISTICS_SCENE_FLICKER);
+  }
 
-    const std::vector<int64_t> availableMinFrameDurationsRaw = {
-        HAL_PIXEL_FORMAT_RAW16, width, height, Sensor::kFrameDurationRange[0],
-    };
+  if (hasCapability(DEPTH_OUTPUT)) {
+    availableResultKeys.add(ANDROID_LENS_POSE_ROTATION);
+    availableResultKeys.add(ANDROID_LENS_POSE_TRANSLATION);
+    availableResultKeys.add(ANDROID_LENS_INTRINSIC_CALIBRATION);
+    availableResultKeys.add(ANDROID_LENS_RADIAL_DISTORTION);
+  }
 
-    const std::vector<int64_t> availableMinFrameDurationsBurst = {
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, width, height, Sensor::kFrameDurationRange[0],
-        HAL_PIXEL_FORMAT_YCbCr_420_888, width, height, Sensor::kFrameDurationRange[0],
-        HAL_PIXEL_FORMAT_RGBA_8888, width, height, Sensor::kFrameDurationRange[0],
-    };
+  availableResultKeys.add(ANDROID_REQUEST_PIPELINE_DEPTH);
+  availableResultKeys.add(ANDROID_SENSOR_TIMESTAMP);
 
-    std::vector<int64_t> availableMinFrameDurations;
+  ADD_STATIC_ENTRY(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS,
+                   availableResultKeys.array(), availableResultKeys.size());
 
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        availableMinFrameDurations.insert(availableMinFrameDurations.end(),
-                availableMinFrameDurationsBasic.begin(),
-                availableMinFrameDurationsBasic.end());
-        if (width > 640) {
-            availableMinFrameDurations.insert(availableMinFrameDurations.end(),
-                    availableMinFrameDurationsBasic640.begin(),
-                    availableMinFrameDurationsBasic640.end());
-        }
-    }
-    if (hasCapability(RAW)) {
-        availableMinFrameDurations.insert(availableMinFrameDurations.end(),
-                availableMinFrameDurationsRaw.begin(),
-                availableMinFrameDurationsRaw.end());
-    }
-    if (hasCapability(BURST_CAPTURE)) {
-        availableMinFrameDurations.insert(availableMinFrameDurations.end(),
-                availableMinFrameDurationsBurst.begin(),
-                availableMinFrameDurationsBurst.end());
-    }
+  // Needs to be last, to collect all the keys set
 
-    if (availableMinFrameDurations.size() > 0) {
-        ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
-                &availableMinFrameDurations[0],
-                availableMinFrameDurations.size());
-    }
+  availableCharacteristicsKeys.add(
+      ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
+  info.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
+              availableCharacteristicsKeys);
 
-    const std::vector<int64_t> availableStallDurationsBasic = {
-        HAL_PIXEL_FORMAT_BLOB, width, height, Sensor::kFrameDurationRange[0],
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 320, 240, 0,
-        HAL_PIXEL_FORMAT_YCbCr_420_888, 320, 240, 0,
-        HAL_PIXEL_FORMAT_RGBA_8888, 320, 240, 0,
-    };
-
-    // Always need to include 640x480 in basic formats
-    const std::vector<int64_t> availableStallDurationsBasic640 = {
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 640, 480, 0,
-        HAL_PIXEL_FORMAT_YCbCr_420_888, 640, 480, 0,
-        HAL_PIXEL_FORMAT_BLOB, 640, 480, Sensor::kFrameDurationRange[0]
-    };
-
-    const std::vector<int64_t> availableStallDurationsRaw = {
-        HAL_PIXEL_FORMAT_RAW16, width, height, Sensor::kFrameDurationRange[0]
-    };
-    const std::vector<int64_t> availableStallDurationsBurst = {
-        HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, width, height, 0,
-        HAL_PIXEL_FORMAT_YCbCr_420_888, width, height, 0,
-        HAL_PIXEL_FORMAT_RGBA_8888, width, height, 0
-    };
-
-    std::vector<int64_t> availableStallDurations;
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        availableStallDurations.insert(availableStallDurations.end(),
-                availableStallDurationsBasic.begin(),
-                availableStallDurationsBasic.end());
-        if (width > 640) {
-            availableStallDurations.insert(availableStallDurations.end(),
-                    availableStallDurationsBasic640.begin(),
-                    availableStallDurationsBasic640.end());
-        }
-    }
-    if (hasCapability(RAW)) {
-        availableStallDurations.insert(availableStallDurations.end(),
-                availableStallDurationsRaw.begin(),
-                availableStallDurationsRaw.end());
-    }
-    if (hasCapability(BURST_CAPTURE)) {
-        availableStallDurations.insert(availableStallDurations.end(),
-                availableStallDurationsBurst.begin(),
-                availableStallDurationsBurst.end());
-    }
-
-    if (availableStallDurations.size() > 0) {
-        ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
-                &availableStallDurations[0],
-                availableStallDurations.size());
-    }
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_FREEFORM;
-        ADD_STATIC_ENTRY(ANDROID_SCALER_CROPPING_TYPE,
-                &croppingType, 1);
-
-        static const float maxZoom = 10;
-        ADD_STATIC_ENTRY(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
-                &maxZoom, 1);
-    }
-
-    // android.jpeg
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const int32_t jpegThumbnailSizes[] = {
-            0, 0,
-            160, 120,
-            320, 240
-        };
-        ADD_STATIC_ENTRY(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
-                jpegThumbnailSizes, sizeof(jpegThumbnailSizes)/sizeof(int32_t));
-
-        static const int32_t jpegMaxSize = JpegCompressor::kMaxJpegSize;
-        ADD_STATIC_ENTRY(ANDROID_JPEG_MAX_SIZE, &jpegMaxSize, 1);
-    }
-
-    // android.stats
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t availableFaceDetectModes[] = {
-            ANDROID_STATISTICS_FACE_DETECT_MODE_OFF,
-            ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE,
-            ANDROID_STATISTICS_FACE_DETECT_MODE_FULL
-        };
-        ADD_STATIC_ENTRY(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
-                availableFaceDetectModes,
-                sizeof(availableFaceDetectModes));
-
-        static const int32_t maxFaceCount = 8;
-        ADD_STATIC_ENTRY(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
-                &maxFaceCount, 1);
-
-
-        static const uint8_t availableShadingMapModes[] = {
-            ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF
-        };
-        ADD_STATIC_ENTRY(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES,
-                availableShadingMapModes, sizeof(availableShadingMapModes));
-    }
-
-    // android.sync
-
-    static const int32_t maxLatency =
-            hasCapability(FULL_LEVEL) ? ANDROID_SYNC_MAX_LATENCY_PER_FRAME_CONTROL : 3;
-    ADD_STATIC_ENTRY(ANDROID_SYNC_MAX_LATENCY, &maxLatency, 1);
-
-    // android.control
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t availableControlModes[] = {
-            ANDROID_CONTROL_MODE_OFF, ANDROID_CONTROL_MODE_AUTO, ANDROID_CONTROL_MODE_USE_SCENE_MODE
-        };
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_MODES,
-                availableControlModes, sizeof(availableControlModes));
-    } else {
-        static const uint8_t availableControlModes[] = {
-            ANDROID_CONTROL_MODE_AUTO
-        };
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_MODES,
-                availableControlModes, sizeof(availableControlModes));
-    }
-
-    static const uint8_t availableSceneModes[] = {
-        hasCapability(BACKWARD_COMPATIBLE) ?
-            ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY :
-            ANDROID_CONTROL_SCENE_MODE_DISABLED
-    };
-    ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
-            availableSceneModes, sizeof(availableSceneModes));
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t availableEffects[] = {
-            ANDROID_CONTROL_EFFECT_MODE_OFF
-        };
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_EFFECTS,
-                availableEffects, sizeof(availableEffects));
-    }
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const int32_t max3aRegions[] = {/*AE*/ 1,/*AWB*/ 0,/*AF*/ 1};
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_MAX_REGIONS,
-                max3aRegions, sizeof(max3aRegions)/sizeof(max3aRegions[0]));
-
-        static const uint8_t availableAeModes[] = {
-            ANDROID_CONTROL_AE_MODE_OFF,
-            ANDROID_CONTROL_AE_MODE_ON
-        };
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_AVAILABLE_MODES,
-                availableAeModes, sizeof(availableAeModes));
-
-        static const camera_metadata_rational exposureCompensationStep = {
-            1, 3
-        };
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_COMPENSATION_STEP,
-                &exposureCompensationStep, 1);
-
-        int32_t exposureCompensationRange[] = {-9, 9};
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
-                exposureCompensationRange,
-                sizeof(exposureCompensationRange)/sizeof(int32_t));
-    }
-
-    static const int32_t availableTargetFpsRanges[] = {
-            5, 30, 15, 30, 15, 15, 30, 30
-    };
-    ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
-            availableTargetFpsRanges,
-            sizeof(availableTargetFpsRanges)/sizeof(int32_t));
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t availableAntibandingModes[] = {
-            ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,
-            ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO
-        };
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
-                availableAntibandingModes, sizeof(availableAntibandingModes));
-    }
-
-    static const uint8_t aeLockAvailable = hasCapability(BACKWARD_COMPATIBLE) ?
-            ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE : ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
-
-    ADD_STATIC_ENTRY(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
-            &aeLockAvailable, 1);
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t availableAwbModes[] = {
-            ANDROID_CONTROL_AWB_MODE_OFF,
-            ANDROID_CONTROL_AWB_MODE_AUTO,
-            ANDROID_CONTROL_AWB_MODE_INCANDESCENT,
-            ANDROID_CONTROL_AWB_MODE_FLUORESCENT,
-            ANDROID_CONTROL_AWB_MODE_DAYLIGHT,
-            ANDROID_CONTROL_AWB_MODE_SHADE
-        };
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
-                availableAwbModes, sizeof(availableAwbModes));
-    }
-
-    static const uint8_t awbLockAvailable = hasCapability(BACKWARD_COMPATIBLE) ?
-            ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE : ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
-
-    ADD_STATIC_ENTRY(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
-            &awbLockAvailable, 1);
-
-    static const uint8_t availableAfModesBack[] = {
-            ANDROID_CONTROL_AF_MODE_OFF,
-            ANDROID_CONTROL_AF_MODE_AUTO,
-            ANDROID_CONTROL_AF_MODE_MACRO,
-            ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,
-            ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE
-    };
-
-    static const uint8_t availableAfModesFront[] = {
-            ANDROID_CONTROL_AF_MODE_OFF
-    };
-
-    if (mFacingBack && hasCapability(BACKWARD_COMPATIBLE)) {
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AF_AVAILABLE_MODES,
-                availableAfModesBack, sizeof(availableAfModesBack));
-    } else {
-        ADD_STATIC_ENTRY(ANDROID_CONTROL_AF_AVAILABLE_MODES,
-                availableAfModesFront, sizeof(availableAfModesFront));
-    }
-
-    static const uint8_t availableVstabModes[] = {
-        ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF
-    };
-    ADD_STATIC_ENTRY(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
-            availableVstabModes, sizeof(availableVstabModes));
-
-    // android.colorCorrection
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t availableAberrationModes[] = {
-            ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
-            ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST,
-            ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY
-        };
-        ADD_STATIC_ENTRY(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
-                availableAberrationModes, sizeof(availableAberrationModes));
-    } else {
-        static const uint8_t availableAberrationModes[] = {
-            ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
-        };
-        ADD_STATIC_ENTRY(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
-                availableAberrationModes, sizeof(availableAberrationModes));
-    }
-    // android.edge
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t availableEdgeModes[] = {
-            ANDROID_EDGE_MODE_OFF, ANDROID_EDGE_MODE_FAST, ANDROID_EDGE_MODE_HIGH_QUALITY
-        };
-        ADD_STATIC_ENTRY(ANDROID_EDGE_AVAILABLE_EDGE_MODES,
-                availableEdgeModes, sizeof(availableEdgeModes));
-    } else {
-        static const uint8_t availableEdgeModes[] = {
-            ANDROID_EDGE_MODE_OFF
-        };
-        ADD_STATIC_ENTRY(ANDROID_EDGE_AVAILABLE_EDGE_MODES,
-                availableEdgeModes, sizeof(availableEdgeModes));
-    }
-
-    // android.info
-
-    static const uint8_t supportedHardwareLevel =
-            hasCapability(FULL_LEVEL) ? ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL :
-                    ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED;
-    ADD_STATIC_ENTRY(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
-                &supportedHardwareLevel,
-                /*count*/1);
-
-    // android.noiseReduction
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t availableNoiseReductionModes[] = {
-            ANDROID_NOISE_REDUCTION_MODE_OFF,
-            ANDROID_NOISE_REDUCTION_MODE_FAST,
-            ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY
-        };
-        ADD_STATIC_ENTRY(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
-                availableNoiseReductionModes, sizeof(availableNoiseReductionModes));
-    } else {
-        static const uint8_t availableNoiseReductionModes[] = {
-            ANDROID_NOISE_REDUCTION_MODE_OFF,
-        };
-        ADD_STATIC_ENTRY(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
-                availableNoiseReductionModes, sizeof(availableNoiseReductionModes));
-    }
-
-    // android.depth
-
-    if (hasCapability(DEPTH_OUTPUT)) {
-
-        static const int32_t maxDepthSamples = 100;
-        ADD_STATIC_ENTRY(ANDROID_DEPTH_MAX_DEPTH_SAMPLES,
-                &maxDepthSamples, 1);
-
-        static const int32_t availableDepthStreamConfigurations[] = {
-            HAL_PIXEL_FORMAT_Y16, 160, 120, ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_OUTPUT,
-            HAL_PIXEL_FORMAT_BLOB, maxDepthSamples,1, ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS_OUTPUT
-        };
-        ADD_STATIC_ENTRY(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS,
-                availableDepthStreamConfigurations,
-                sizeof(availableDepthStreamConfigurations)/sizeof(int32_t));
-
-        static const int64_t availableDepthMinFrameDurations[] = {
-            HAL_PIXEL_FORMAT_Y16, 160, 120, Sensor::kFrameDurationRange[0],
-            HAL_PIXEL_FORMAT_BLOB, maxDepthSamples,1, Sensor::kFrameDurationRange[0]
-        };
-        ADD_STATIC_ENTRY(ANDROID_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS,
-                availableDepthMinFrameDurations,
-                sizeof(availableDepthMinFrameDurations)/sizeof(int64_t));
-
-        static const int64_t availableDepthStallDurations[] = {
-            HAL_PIXEL_FORMAT_Y16, 160, 120, Sensor::kFrameDurationRange[0],
-            HAL_PIXEL_FORMAT_BLOB, maxDepthSamples,1, Sensor::kFrameDurationRange[0]
-        };
-        ADD_STATIC_ENTRY(ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS,
-                availableDepthStallDurations,
-                sizeof(availableDepthStallDurations)/sizeof(int64_t));
-
-        uint8_t depthIsExclusive = ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE_FALSE;
-        ADD_STATIC_ENTRY(ANDROID_DEPTH_DEPTH_IS_EXCLUSIVE,
-                &depthIsExclusive, 1);
-    }
-
-    // android.shading
-
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t availableShadingModes[] = {
-            ANDROID_SHADING_MODE_OFF, ANDROID_SHADING_MODE_FAST, ANDROID_SHADING_MODE_HIGH_QUALITY
-        };
-        ADD_STATIC_ENTRY(ANDROID_SHADING_AVAILABLE_MODES, availableShadingModes,
-                sizeof(availableShadingModes));
-    } else {
-        static const uint8_t availableShadingModes[] = {
-            ANDROID_SHADING_MODE_OFF
-        };
-        ADD_STATIC_ENTRY(ANDROID_SHADING_AVAILABLE_MODES, availableShadingModes,
-                sizeof(availableShadingModes));
-    }
-
-    // android.request
-
-    static const int32_t maxNumOutputStreams[] = {
-            kMaxRawStreamCount, kMaxProcessedStreamCount, kMaxJpegStreamCount
-    };
-    ADD_STATIC_ENTRY(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, maxNumOutputStreams, 3);
-
-    static const uint8_t maxPipelineDepth = kMaxBufferCount;
-    ADD_STATIC_ENTRY(ANDROID_REQUEST_PIPELINE_MAX_DEPTH, &maxPipelineDepth, 1);
-
-    static const int32_t partialResultCount = 1;
-    ADD_STATIC_ENTRY(ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
-            &partialResultCount, /*count*/1);
-
-    SortedVector<uint8_t> caps;
-    for (size_t i = 0; i < mCapabilities.size(); i++) {
-        switch(mCapabilities[i]) {
-            case BACKWARD_COMPATIBLE:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE);
-                break;
-            case MANUAL_SENSOR:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR);
-                break;
-            case MANUAL_POST_PROCESSING:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING);
-                break;
-            case RAW:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW);
-                break;
-            case PRIVATE_REPROCESSING:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING);
-                break;
-            case READ_SENSOR_SETTINGS:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS);
-                break;
-            case BURST_CAPTURE:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE);
-                break;
-            case YUV_REPROCESSING:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING);
-                break;
-            case DEPTH_OUTPUT:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_DEPTH_OUTPUT);
-                break;
-            case CONSTRAINED_HIGH_SPEED_VIDEO:
-                caps.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO);
-                break;
-            default:
-                // Ignore LEVELs
-                break;
-        }
-    }
-    ADD_STATIC_ENTRY(ANDROID_REQUEST_AVAILABLE_CAPABILITIES, caps.array(), caps.size());
-
-    // Scan a default request template for included request keys
-    Vector<int32_t> availableRequestKeys;
-    const camera_metadata_t *previewRequest =
-        constructDefaultRequestSettings(CAMERA3_TEMPLATE_PREVIEW);
-    for (size_t i = 0; i < get_camera_metadata_entry_count(previewRequest); i++) {
-        camera_metadata_ro_entry_t entry;
-        get_camera_metadata_ro_entry(previewRequest, i, &entry);
-        availableRequestKeys.add(entry.tag);
-    }
-    ADD_STATIC_ENTRY(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, availableRequestKeys.array(),
-            availableRequestKeys.size());
-
-    // Add a few more result keys. Must be kept up to date with the various places that add these
-
-    Vector<int32_t> availableResultKeys(availableRequestKeys);
-    if (hasCapability(BACKWARD_COMPATIBLE)) {
-        availableResultKeys.add(ANDROID_CONTROL_AE_STATE);
-        availableResultKeys.add(ANDROID_CONTROL_AF_STATE);
-        availableResultKeys.add(ANDROID_CONTROL_AWB_STATE);
-        availableResultKeys.add(ANDROID_FLASH_STATE);
-        availableResultKeys.add(ANDROID_LENS_STATE);
-        availableResultKeys.add(ANDROID_LENS_FOCUS_RANGE);
-        availableResultKeys.add(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW);
-        availableResultKeys.add(ANDROID_STATISTICS_SCENE_FLICKER);
-    }
-
-    if (hasCapability(DEPTH_OUTPUT)) {
-        availableResultKeys.add(ANDROID_LENS_POSE_ROTATION);
-        availableResultKeys.add(ANDROID_LENS_POSE_TRANSLATION);
-        availableResultKeys.add(ANDROID_LENS_INTRINSIC_CALIBRATION);
-        availableResultKeys.add(ANDROID_LENS_RADIAL_DISTORTION);
-    }
-
-    availableResultKeys.add(ANDROID_REQUEST_PIPELINE_DEPTH);
-    availableResultKeys.add(ANDROID_SENSOR_TIMESTAMP);
-
-    ADD_STATIC_ENTRY(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS, availableResultKeys.array(),
-            availableResultKeys.size());
-
-    // Needs to be last, to collect all the keys set
-
-    availableCharacteristicsKeys.add(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
-    info.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
-            availableCharacteristicsKeys);
-
-    mCameraInfo = info.release();
+  mCameraInfo = info.release();
 
 #undef ADD_STATIC_ENTRY
-    return OK;
+  return OK;
 }
 
 status_t EmulatedFakeCamera3::process3A(CameraMetadata &settings) {
-    /**
-     * Extract top-level 3A controls
-     */
-    status_t res;
+  /**
+   * Extract top-level 3A controls
+   */
+  status_t res;
 
-    bool facePriority = false;
+  bool facePriority = false;
 
-    camera_metadata_entry e;
+  camera_metadata_entry e;
 
-    e = settings.find(ANDROID_CONTROL_MODE);
-    if (e.count == 0) {
-        ALOGE("%s: No control mode entry!", __FUNCTION__);
-        return BAD_VALUE;
-    }
-    uint8_t controlMode = e.data.u8[0];
+  e = settings.find(ANDROID_CONTROL_MODE);
+  if (e.count == 0) {
+    ALOGE("%s: No control mode entry!", __FUNCTION__);
+    return BAD_VALUE;
+  }
+  uint8_t controlMode = e.data.u8[0];
 
-    if (controlMode == ANDROID_CONTROL_MODE_OFF) {
-        mAeMode   = ANDROID_CONTROL_AE_MODE_OFF;
-        mAfMode   = ANDROID_CONTROL_AF_MODE_OFF;
-        mAwbMode  = ANDROID_CONTROL_AWB_MODE_OFF;
-        mAeState  = ANDROID_CONTROL_AE_STATE_INACTIVE;
-        mAfState  = ANDROID_CONTROL_AF_STATE_INACTIVE;
-        mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
-        update3A(settings);
-        return OK;
-    } else if (controlMode == ANDROID_CONTROL_MODE_USE_SCENE_MODE) {
-        if (!hasCapability(BACKWARD_COMPATIBLE)) {
-            ALOGE("%s: Can't use scene mode when BACKWARD_COMPATIBLE not supported!",
-                  __FUNCTION__);
-            return BAD_VALUE;
-        }
-
-        e = settings.find(ANDROID_CONTROL_SCENE_MODE);
-        if (e.count == 0) {
-            ALOGE("%s: No scene mode entry!", __FUNCTION__);
-            return BAD_VALUE;
-        }
-        uint8_t sceneMode = e.data.u8[0];
-
-        switch(sceneMode) {
-            case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY:
-                mFacePriority = true;
-                break;
-            default:
-                ALOGE("%s: Emulator doesn't support scene mode %d",
-                        __FUNCTION__, sceneMode);
-                return BAD_VALUE;
-        }
-    } else {
-        mFacePriority = false;
-    }
-
-    // controlMode == AUTO or sceneMode = FACE_PRIORITY
-    // Process individual 3A controls
-
-    res = doFakeAE(settings);
-    if (res != OK) return res;
-
-    res = doFakeAF(settings);
-    if (res != OK) return res;
-
-    res = doFakeAWB(settings);
-    if (res != OK) return res;
-
+  if (controlMode == ANDROID_CONTROL_MODE_OFF) {
+    mAeMode = ANDROID_CONTROL_AE_MODE_OFF;
+    mAfMode = ANDROID_CONTROL_AF_MODE_OFF;
+    mAwbMode = ANDROID_CONTROL_AWB_MODE_OFF;
+    mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+    mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+    mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
     update3A(settings);
     return OK;
+  } else if (controlMode == ANDROID_CONTROL_MODE_USE_SCENE_MODE) {
+    if (!hasCapability(BACKWARD_COMPATIBLE)) {
+      ALOGE("%s: Can't use scene mode when BACKWARD_COMPATIBLE not supported!",
+            __FUNCTION__);
+      return BAD_VALUE;
+    }
+
+    e = settings.find(ANDROID_CONTROL_SCENE_MODE);
+    if (e.count == 0) {
+      ALOGE("%s: No scene mode entry!", __FUNCTION__);
+      return BAD_VALUE;
+    }
+    uint8_t sceneMode = e.data.u8[0];
+
+    switch (sceneMode) {
+      case ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY:
+        mFacePriority = true;
+        break;
+      default:
+        ALOGE("%s: Emulator doesn't support scene mode %d", __FUNCTION__,
+              sceneMode);
+        return BAD_VALUE;
+    }
+  } else {
+    mFacePriority = false;
+  }
+
+  // controlMode == AUTO or sceneMode = FACE_PRIORITY
+  // Process individual 3A controls
+
+  res = doFakeAE(settings);
+  if (res != OK) return res;
+
+  res = doFakeAF(settings);
+  if (res != OK) return res;
+
+  res = doFakeAWB(settings);
+  if (res != OK) return res;
+
+  update3A(settings);
+  return OK;
 }
 
 status_t EmulatedFakeCamera3::doFakeAE(CameraMetadata &settings) {
-    camera_metadata_entry e;
+  camera_metadata_entry e;
 
-    e = settings.find(ANDROID_CONTROL_AE_MODE);
-    if (e.count == 0 && hasCapability(BACKWARD_COMPATIBLE)) {
-        ALOGE("%s: No AE mode entry!", __FUNCTION__);
-        return BAD_VALUE;
-    }
-    uint8_t aeMode = (e.count > 0) ? e.data.u8[0] : (uint8_t)ANDROID_CONTROL_AE_MODE_ON;
-    mAeMode = aeMode;
+  e = settings.find(ANDROID_CONTROL_AE_MODE);
+  if (e.count == 0 && hasCapability(BACKWARD_COMPATIBLE)) {
+    ALOGE("%s: No AE mode entry!", __FUNCTION__);
+    return BAD_VALUE;
+  }
+  uint8_t aeMode =
+      (e.count > 0) ? e.data.u8[0] : (uint8_t)ANDROID_CONTROL_AE_MODE_ON;
+  mAeMode = aeMode;
 
-    switch (aeMode) {
-        case ANDROID_CONTROL_AE_MODE_OFF:
-            // AE is OFF
-            mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
-            return OK;
-        case ANDROID_CONTROL_AE_MODE_ON:
-            // OK for AUTO modes
-            break;
-        default:
-            // Mostly silently ignore unsupported modes
-            ALOGV("%s: Emulator doesn't support AE mode %d, assuming ON",
-                    __FUNCTION__, aeMode);
-            break;
+  switch (aeMode) {
+    case ANDROID_CONTROL_AE_MODE_OFF:
+      // AE is OFF
+      mAeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+      return OK;
+    case ANDROID_CONTROL_AE_MODE_ON:
+      // OK for AUTO modes
+      break;
+    default:
+      // Mostly silently ignore unsupported modes
+      ALOGV("%s: Emulator doesn't support AE mode %d, assuming ON",
+            __FUNCTION__, aeMode);
+      break;
+  }
+
+  e = settings.find(ANDROID_CONTROL_AE_LOCK);
+  bool aeLocked =
+      (e.count > 0) ? (e.data.u8[0] == ANDROID_CONTROL_AE_LOCK_ON) : false;
+
+  e = settings.find(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER);
+  bool precaptureTrigger = false;
+  if (e.count != 0) {
+    precaptureTrigger =
+        (e.data.u8[0] == ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_START);
+  }
+
+  if (precaptureTrigger) {
+    ALOGV("%s: Pre capture trigger = %d", __FUNCTION__, precaptureTrigger);
+  } else if (e.count > 0) {
+    ALOGV("%s: Pre capture trigger was present? %zu", __FUNCTION__, e.count);
+  }
+
+  if (precaptureTrigger || mAeState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+    // Run precapture sequence
+    if (mAeState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
+      mAeCounter = 0;
     }
 
-    e = settings.find(ANDROID_CONTROL_AE_LOCK);
-    bool aeLocked = (e.count > 0) ? (e.data.u8[0] == ANDROID_CONTROL_AE_LOCK_ON) : false;
-
-    e = settings.find(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER);
-    bool precaptureTrigger = false;
-    if (e.count != 0) {
-        precaptureTrigger =
-                (e.data.u8[0] == ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_START);
-    }
-
-    if (precaptureTrigger) {
-        ALOGV("%s: Pre capture trigger = %d", __FUNCTION__, precaptureTrigger);
-    } else if (e.count > 0) {
-        ALOGV("%s: Pre capture trigger was present? %zu",
-              __FUNCTION__,
-              e.count);
-    }
-
-    if (precaptureTrigger || mAeState == ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
-        // Run precapture sequence
-        if (mAeState != ANDROID_CONTROL_AE_STATE_PRECAPTURE) {
-            mAeCounter = 0;
-        }
-
-        if (mFacePriority) {
-            mAeTargetExposureTime = kFacePriorityExposureTime;
-        } else {
-            mAeTargetExposureTime = kNormalExposureTime;
-        }
-
-        if (mAeCounter > kPrecaptureMinFrames &&
-                (mAeTargetExposureTime - mAeCurrentExposureTime) <
-                mAeTargetExposureTime / 10) {
-            // Done with precapture
-            mAeCounter = 0;
-            mAeState = aeLocked ? ANDROID_CONTROL_AE_STATE_LOCKED :
-                    ANDROID_CONTROL_AE_STATE_CONVERGED;
-        } else {
-            // Converge some more
-            mAeCurrentExposureTime +=
-                    (mAeTargetExposureTime - mAeCurrentExposureTime) *
-                    kExposureTrackRate;
-            mAeCounter++;
-            mAeState = ANDROID_CONTROL_AE_STATE_PRECAPTURE;
-        }
-
-    } else if (!aeLocked) {
-        // Run standard occasional AE scan
-        switch (mAeState) {
-            case ANDROID_CONTROL_AE_STATE_CONVERGED:
-            case ANDROID_CONTROL_AE_STATE_INACTIVE:
-                mAeCounter++;
-                if (mAeCounter > kStableAeMaxFrames) {
-                    mAeTargetExposureTime =
-                            mFacePriority ? kFacePriorityExposureTime :
-                            kNormalExposureTime;
-                    float exposureStep = ((double)rand() / RAND_MAX) *
-                            (kExposureWanderMax - kExposureWanderMin) +
-                            kExposureWanderMin;
-                    mAeTargetExposureTime *= std::pow(2, exposureStep);
-                    mAeState = ANDROID_CONTROL_AE_STATE_SEARCHING;
-                }
-                break;
-            case ANDROID_CONTROL_AE_STATE_SEARCHING:
-                mAeCurrentExposureTime +=
-                        (mAeTargetExposureTime - mAeCurrentExposureTime) *
-                        kExposureTrackRate;
-                if (abs(mAeTargetExposureTime - mAeCurrentExposureTime) <
-                        mAeTargetExposureTime / 10) {
-                    // Close enough
-                    mAeState = ANDROID_CONTROL_AE_STATE_CONVERGED;
-                    mAeCounter = 0;
-                }
-                break;
-            case ANDROID_CONTROL_AE_STATE_LOCKED:
-                mAeState = ANDROID_CONTROL_AE_STATE_CONVERGED;
-                mAeCounter = 0;
-                break;
-            default:
-                ALOGE("%s: Emulator in unexpected AE state %d",
-                        __FUNCTION__, mAeState);
-                return INVALID_OPERATION;
-        }
+    if (mFacePriority) {
+      mAeTargetExposureTime = kFacePriorityExposureTime;
     } else {
-        // AE is locked
-        mAeState = ANDROID_CONTROL_AE_STATE_LOCKED;
+      mAeTargetExposureTime = kNormalExposureTime;
     }
 
-    return OK;
+    if (mAeCounter > kPrecaptureMinFrames &&
+        (mAeTargetExposureTime - mAeCurrentExposureTime) <
+            mAeTargetExposureTime / 10) {
+      // Done with precapture
+      mAeCounter = 0;
+      mAeState = aeLocked ? ANDROID_CONTROL_AE_STATE_LOCKED
+                          : ANDROID_CONTROL_AE_STATE_CONVERGED;
+    } else {
+      // Converge some more
+      mAeCurrentExposureTime +=
+          (mAeTargetExposureTime - mAeCurrentExposureTime) * kExposureTrackRate;
+      mAeCounter++;
+      mAeState = ANDROID_CONTROL_AE_STATE_PRECAPTURE;
+    }
+
+  } else if (!aeLocked) {
+    // Run standard occasional AE scan
+    switch (mAeState) {
+      case ANDROID_CONTROL_AE_STATE_CONVERGED:
+      case ANDROID_CONTROL_AE_STATE_INACTIVE:
+        mAeCounter++;
+        if (mAeCounter > kStableAeMaxFrames) {
+          mAeTargetExposureTime =
+              mFacePriority ? kFacePriorityExposureTime : kNormalExposureTime;
+          float exposureStep = ((double)rand() / RAND_MAX) *
+                                   (kExposureWanderMax - kExposureWanderMin) +
+                               kExposureWanderMin;
+          mAeTargetExposureTime *= std::pow(2, exposureStep);
+          mAeState = ANDROID_CONTROL_AE_STATE_SEARCHING;
+        }
+        break;
+      case ANDROID_CONTROL_AE_STATE_SEARCHING:
+        mAeCurrentExposureTime +=
+            (mAeTargetExposureTime - mAeCurrentExposureTime) *
+            kExposureTrackRate;
+        if (abs(mAeTargetExposureTime - mAeCurrentExposureTime) <
+            mAeTargetExposureTime / 10) {
+          // Close enough
+          mAeState = ANDROID_CONTROL_AE_STATE_CONVERGED;
+          mAeCounter = 0;
+        }
+        break;
+      case ANDROID_CONTROL_AE_STATE_LOCKED:
+        mAeState = ANDROID_CONTROL_AE_STATE_CONVERGED;
+        mAeCounter = 0;
+        break;
+      default:
+        ALOGE("%s: Emulator in unexpected AE state %d", __FUNCTION__, mAeState);
+        return INVALID_OPERATION;
+    }
+  } else {
+    // AE is locked
+    mAeState = ANDROID_CONTROL_AE_STATE_LOCKED;
+  }
+
+  return OK;
 }
 
 status_t EmulatedFakeCamera3::doFakeAF(CameraMetadata &settings) {
-    camera_metadata_entry e;
+  camera_metadata_entry e;
 
-    e = settings.find(ANDROID_CONTROL_AF_MODE);
-    if (e.count == 0 && hasCapability(BACKWARD_COMPATIBLE)) {
-        ALOGE("%s: No AF mode entry!", __FUNCTION__);
+  e = settings.find(ANDROID_CONTROL_AF_MODE);
+  if (e.count == 0 && hasCapability(BACKWARD_COMPATIBLE)) {
+    ALOGE("%s: No AF mode entry!", __FUNCTION__);
+    return BAD_VALUE;
+  }
+  uint8_t afMode =
+      (e.count > 0) ? e.data.u8[0] : (uint8_t)ANDROID_CONTROL_AF_MODE_OFF;
+
+  e = settings.find(ANDROID_CONTROL_AF_TRIGGER);
+  typedef camera_metadata_enum_android_control_af_trigger af_trigger_t;
+  af_trigger_t afTrigger;
+  if (e.count != 0) {
+    afTrigger = static_cast<af_trigger_t>(e.data.u8[0]);
+
+    ALOGV("%s: AF trigger set to 0x%x", __FUNCTION__, afTrigger);
+    ALOGV("%s: AF mode is 0x%x", __FUNCTION__, afMode);
+  } else {
+    afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+  }
+
+  switch (afMode) {
+    case ANDROID_CONTROL_AF_MODE_OFF:
+      mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+      return OK;
+    case ANDROID_CONTROL_AF_MODE_AUTO:
+    case ANDROID_CONTROL_AF_MODE_MACRO:
+    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+      if (!mFacingBack) {
+        ALOGE("%s: Front camera doesn't support AF mode %d", __FUNCTION__,
+              afMode);
         return BAD_VALUE;
-    }
-    uint8_t afMode = (e.count > 0) ? e.data.u8[0] : (uint8_t)ANDROID_CONTROL_AF_MODE_OFF;
+      }
+      // OK, handle transitions lower on
+      break;
+    default:
+      ALOGE("%s: Emulator doesn't support AF mode %d", __FUNCTION__, afMode);
+      return BAD_VALUE;
+  }
 
-    e = settings.find(ANDROID_CONTROL_AF_TRIGGER);
-    typedef camera_metadata_enum_android_control_af_trigger af_trigger_t;
-    af_trigger_t afTrigger;
-    if (e.count != 0) {
-        afTrigger = static_cast<af_trigger_t>(e.data.u8[0]);
+  bool afModeChanged = mAfMode != afMode;
+  mAfMode = afMode;
 
-        ALOGV("%s: AF trigger set to 0x%x", __FUNCTION__, afTrigger);
-        ALOGV("%s: AF mode is 0x%x", __FUNCTION__, afMode);
-    } else {
-        afTrigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
-    }
+  /**
+   * Simulate AF triggers. Transition at most 1 state per frame.
+   * - Focusing always succeeds (goes into locked, or PASSIVE_SCAN).
+   */
 
-    switch (afMode) {
-        case ANDROID_CONTROL_AF_MODE_OFF:
-            mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
-            return OK;
-        case ANDROID_CONTROL_AF_MODE_AUTO:
-        case ANDROID_CONTROL_AF_MODE_MACRO:
-        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
-        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
-            if (!mFacingBack) {
-                ALOGE("%s: Front camera doesn't support AF mode %d",
-                        __FUNCTION__, afMode);
-                return BAD_VALUE;
-            }
-            // OK, handle transitions lower on
+  bool afTriggerStart = false;
+  bool afTriggerCancel = false;
+  switch (afTrigger) {
+    case ANDROID_CONTROL_AF_TRIGGER_IDLE:
+      break;
+    case ANDROID_CONTROL_AF_TRIGGER_START:
+      afTriggerStart = true;
+      break;
+    case ANDROID_CONTROL_AF_TRIGGER_CANCEL:
+      afTriggerCancel = true;
+      // Cancel trigger always transitions into INACTIVE
+      mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+
+      ALOGV("%s: AF State transition to STATE_INACTIVE", __FUNCTION__);
+
+      // Stay in 'inactive' until at least next frame
+      return OK;
+    default:
+      ALOGE("%s: Unknown af trigger value %d", __FUNCTION__, afTrigger);
+      return BAD_VALUE;
+  }
+
+  // If we get down here, we're either in an autofocus mode
+  //  or in a continuous focus mode (and no other modes)
+
+  int oldAfState = mAfState;
+  switch (mAfState) {
+    case ANDROID_CONTROL_AF_STATE_INACTIVE:
+      if (afTriggerStart) {
+        switch (afMode) {
+          case ANDROID_CONTROL_AF_MODE_AUTO:
+            // fall-through
+          case ANDROID_CONTROL_AF_MODE_MACRO:
+            mAfState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
             break;
-        default:
-            ALOGE("%s: Emulator doesn't support AF mode %d",
-                    __FUNCTION__, afMode);
-            return BAD_VALUE;
-    }
-
-    bool afModeChanged = mAfMode != afMode;
-    mAfMode = afMode;
-
-    /**
-     * Simulate AF triggers. Transition at most 1 state per frame.
-     * - Focusing always succeeds (goes into locked, or PASSIVE_SCAN).
-     */
-
-    bool afTriggerStart = false;
-    bool afTriggerCancel = false;
-    switch (afTrigger) {
-        case ANDROID_CONTROL_AF_TRIGGER_IDLE:
+          case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+            // fall-through
+          case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+            mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
             break;
-        case ANDROID_CONTROL_AF_TRIGGER_START:
-            afTriggerStart = true;
+        }
+      } else {
+        // At least one frame stays in INACTIVE
+        if (!afModeChanged) {
+          switch (afMode) {
+            case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+              // fall-through
+            case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+              mAfState = ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN;
+              break;
+          }
+        }
+      }
+      break;
+    case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+      /**
+       * When the AF trigger is activated, the algorithm should finish
+       * its PASSIVE_SCAN if active, and then transition into AF_FOCUSED
+       * or AF_NOT_FOCUSED as appropriate
+       */
+      if (afTriggerStart) {
+        // Randomly transition to focused or not focused
+        if (rand() % 3) {
+          mAfState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+        } else {
+          mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+        }
+      }
+      /**
+       * When the AF trigger is not involved, the AF algorithm should
+       * start in INACTIVE state, and then transition into PASSIVE_SCAN
+       * and PASSIVE_FOCUSED states
+       */
+      else if (!afTriggerCancel) {
+        // Randomly transition to passive focus
+        if (rand() % 3 == 0) {
+          mAfState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
+        }
+      }
+
+      break;
+    case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+      if (afTriggerStart) {
+        // Randomly transition to focused or not focused
+        if (rand() % 3) {
+          mAfState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+        } else {
+          mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+        }
+      }
+      // TODO: initiate passive scan (PASSIVE_SCAN)
+      break;
+    case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
+      // Simulate AF sweep completing instantaneously
+
+      // Randomly transition to focused or not focused
+      if (rand() % 3) {
+        mAfState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
+      } else {
+        mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
+      }
+      break;
+    case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+      if (afTriggerStart) {
+        switch (afMode) {
+          case ANDROID_CONTROL_AF_MODE_AUTO:
+            // fall-through
+          case ANDROID_CONTROL_AF_MODE_MACRO:
+            mAfState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
             break;
-        case ANDROID_CONTROL_AF_TRIGGER_CANCEL:
-            afTriggerCancel = true;
-            // Cancel trigger always transitions into INACTIVE
-            mAfState = ANDROID_CONTROL_AF_STATE_INACTIVE;
-
-            ALOGV("%s: AF State transition to STATE_INACTIVE", __FUNCTION__);
-
-            // Stay in 'inactive' until at least next frame
-            return OK;
-        default:
-            ALOGE("%s: Unknown af trigger value %d", __FUNCTION__, afTrigger);
-            return BAD_VALUE;
-    }
-
-    // If we get down here, we're either in an autofocus mode
-    //  or in a continuous focus mode (and no other modes)
-
-    int oldAfState = mAfState;
-    switch (mAfState) {
-        case ANDROID_CONTROL_AF_STATE_INACTIVE:
-            if (afTriggerStart) {
-                switch (afMode) {
-                    case ANDROID_CONTROL_AF_MODE_AUTO:
-                        // fall-through
-                    case ANDROID_CONTROL_AF_MODE_MACRO:
-                        mAfState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
-                        break;
-                    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
-                        // fall-through
-                    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
-                        mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
-                        break;
-                }
-            } else {
-                // At least one frame stays in INACTIVE
-                if (!afModeChanged) {
-                    switch (afMode) {
-                        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
-                            // fall-through
-                        case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
-                            mAfState = ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN;
-                            break;
-                    }
-                }
-            }
+          case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+            // fall-through
+          case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+            // continuous autofocus => trigger start has no effect
             break;
-        case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
-            /**
-             * When the AF trigger is activated, the algorithm should finish
-             * its PASSIVE_SCAN if active, and then transition into AF_FOCUSED
-             * or AF_NOT_FOCUSED as appropriate
-             */
-            if (afTriggerStart) {
-                // Randomly transition to focused or not focused
-                if (rand() % 3) {
-                    mAfState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
-                } else {
-                    mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
-                }
-            }
-            /**
-             * When the AF trigger is not involved, the AF algorithm should
-             * start in INACTIVE state, and then transition into PASSIVE_SCAN
-             * and PASSIVE_FOCUSED states
-             */
-            else if (!afTriggerCancel) {
-               // Randomly transition to passive focus
-                if (rand() % 3 == 0) {
-                    mAfState = ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED;
-                }
-            }
-
+        }
+      }
+      break;
+    case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+      if (afTriggerStart) {
+        switch (afMode) {
+          case ANDROID_CONTROL_AF_MODE_AUTO:
+            // fall-through
+          case ANDROID_CONTROL_AF_MODE_MACRO:
+            mAfState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
             break;
-        case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
-            if (afTriggerStart) {
-                // Randomly transition to focused or not focused
-                if (rand() % 3) {
-                    mAfState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
-                } else {
-                    mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
-                }
-            }
-            // TODO: initiate passive scan (PASSIVE_SCAN)
+          case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
+            // fall-through
+          case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
+            // continuous autofocus => trigger start has no effect
             break;
-        case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
-            // Simulate AF sweep completing instantaneously
+        }
+      }
+      break;
+    default:
+      ALOGE("%s: Bad af state %d", __FUNCTION__, mAfState);
+  }
 
-            // Randomly transition to focused or not focused
-            if (rand() % 3) {
-                mAfState = ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED;
-            } else {
-                mAfState = ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED;
-            }
-            break;
-        case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
-            if (afTriggerStart) {
-                switch (afMode) {
-                    case ANDROID_CONTROL_AF_MODE_AUTO:
-                        // fall-through
-                    case ANDROID_CONTROL_AF_MODE_MACRO:
-                        mAfState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
-                        break;
-                    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
-                        // fall-through
-                    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
-                        // continuous autofocus => trigger start has no effect
-                        break;
-                }
-            }
-            break;
-        case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
-            if (afTriggerStart) {
-                switch (afMode) {
-                    case ANDROID_CONTROL_AF_MODE_AUTO:
-                        // fall-through
-                    case ANDROID_CONTROL_AF_MODE_MACRO:
-                        mAfState = ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN;
-                        break;
-                    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO:
-                        // fall-through
-                    case ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE:
-                        // continuous autofocus => trigger start has no effect
-                        break;
-                }
-            }
-            break;
-        default:
-            ALOGE("%s: Bad af state %d", __FUNCTION__, mAfState);
-    }
+  {
+    char afStateString[100] = {
+        0,
+    };
+    camera_metadata_enum_snprint(ANDROID_CONTROL_AF_STATE, oldAfState,
+                                 afStateString, sizeof(afStateString));
 
-    {
-        char afStateString[100] = {0,};
-        camera_metadata_enum_snprint(ANDROID_CONTROL_AF_STATE,
-                oldAfState,
-                afStateString,
-                sizeof(afStateString));
+    char afNewStateString[100] = {
+        0,
+    };
+    camera_metadata_enum_snprint(ANDROID_CONTROL_AF_STATE, mAfState,
+                                 afNewStateString, sizeof(afNewStateString));
+    ALOGVV("%s: AF state transitioned from %s to %s", __FUNCTION__,
+           afStateString, afNewStateString);
+  }
 
-        char afNewStateString[100] = {0,};
-        camera_metadata_enum_snprint(ANDROID_CONTROL_AF_STATE,
-                mAfState,
-                afNewStateString,
-                sizeof(afNewStateString));
-        ALOGVV("%s: AF state transitioned from %s to %s",
-              __FUNCTION__, afStateString, afNewStateString);
-    }
-
-
-    return OK;
+  return OK;
 }
 
 status_t EmulatedFakeCamera3::doFakeAWB(CameraMetadata &settings) {
-    camera_metadata_entry e;
+  camera_metadata_entry e;
 
-    e = settings.find(ANDROID_CONTROL_AWB_MODE);
-    if (e.count == 0 && hasCapability(BACKWARD_COMPATIBLE)) {
-        ALOGE("%s: No AWB mode entry!", __FUNCTION__);
-        return BAD_VALUE;
-    }
-    uint8_t awbMode = (e.count > 0) ? e.data.u8[0] : (uint8_t)ANDROID_CONTROL_AWB_MODE_AUTO;
+  e = settings.find(ANDROID_CONTROL_AWB_MODE);
+  if (e.count == 0 && hasCapability(BACKWARD_COMPATIBLE)) {
+    ALOGE("%s: No AWB mode entry!", __FUNCTION__);
+    return BAD_VALUE;
+  }
+  uint8_t awbMode =
+      (e.count > 0) ? e.data.u8[0] : (uint8_t)ANDROID_CONTROL_AWB_MODE_AUTO;
 
-    // TODO: Add white balance simulation
+  // TODO: Add white balance simulation
 
-    e = settings.find(ANDROID_CONTROL_AWB_LOCK);
-    bool awbLocked = (e.count > 0) ? (e.data.u8[0] == ANDROID_CONTROL_AWB_LOCK_ON) : false;
+  e = settings.find(ANDROID_CONTROL_AWB_LOCK);
+  bool awbLocked =
+      (e.count > 0) ? (e.data.u8[0] == ANDROID_CONTROL_AWB_LOCK_ON) : false;
 
-    switch (awbMode) {
-        case ANDROID_CONTROL_AWB_MODE_OFF:
-            mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
-            break;
-        case ANDROID_CONTROL_AWB_MODE_AUTO:
-        case ANDROID_CONTROL_AWB_MODE_INCANDESCENT:
-        case ANDROID_CONTROL_AWB_MODE_FLUORESCENT:
-        case ANDROID_CONTROL_AWB_MODE_DAYLIGHT:
-        case ANDROID_CONTROL_AWB_MODE_SHADE:
-            // Always magically right, or locked
-            mAwbState = awbLocked ? ANDROID_CONTROL_AWB_STATE_LOCKED :
-                    ANDROID_CONTROL_AWB_STATE_CONVERGED;
-            break;
-        default:
-            ALOGE("%s: Emulator doesn't support AWB mode %d",
-                    __FUNCTION__, awbMode);
-            return BAD_VALUE;
-    }
+  switch (awbMode) {
+    case ANDROID_CONTROL_AWB_MODE_OFF:
+      mAwbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
+      break;
+    case ANDROID_CONTROL_AWB_MODE_AUTO:
+    case ANDROID_CONTROL_AWB_MODE_INCANDESCENT:
+    case ANDROID_CONTROL_AWB_MODE_FLUORESCENT:
+    case ANDROID_CONTROL_AWB_MODE_DAYLIGHT:
+    case ANDROID_CONTROL_AWB_MODE_SHADE:
+      // Always magically right, or locked
+      mAwbState = awbLocked ? ANDROID_CONTROL_AWB_STATE_LOCKED
+                            : ANDROID_CONTROL_AWB_STATE_CONVERGED;
+      break;
+    default:
+      ALOGE("%s: Emulator doesn't support AWB mode %d", __FUNCTION__, awbMode);
+      return BAD_VALUE;
+  }
 
-    return OK;
+  return OK;
 }
 
-
 void EmulatedFakeCamera3::update3A(CameraMetadata &settings) {
-    if (mAeMode != ANDROID_CONTROL_AE_MODE_OFF) {
-        settings.update(ANDROID_SENSOR_EXPOSURE_TIME,
-                &mAeCurrentExposureTime, 1);
-        settings.update(ANDROID_SENSOR_SENSITIVITY,
-                &mAeCurrentSensitivity, 1);
-    }
+  if (mAeMode != ANDROID_CONTROL_AE_MODE_OFF) {
+    settings.update(ANDROID_SENSOR_EXPOSURE_TIME, &mAeCurrentExposureTime, 1);
+    settings.update(ANDROID_SENSOR_SENSITIVITY, &mAeCurrentSensitivity, 1);
+  }
 
-    settings.update(ANDROID_CONTROL_AE_STATE,
-            &mAeState, 1);
-    settings.update(ANDROID_CONTROL_AF_STATE,
-            &mAfState, 1);
-    settings.update(ANDROID_CONTROL_AWB_STATE,
-            &mAwbState, 1);
+  settings.update(ANDROID_CONTROL_AE_STATE, &mAeState, 1);
+  settings.update(ANDROID_CONTROL_AF_STATE, &mAfState, 1);
+  settings.update(ANDROID_CONTROL_AWB_STATE, &mAwbState, 1);
 
-    uint8_t lensState;
-    switch (mAfState) {
-        case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
-        case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
-            lensState = ANDROID_LENS_STATE_MOVING;
-            break;
-        case ANDROID_CONTROL_AF_STATE_INACTIVE:
-        case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
-        case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
-        case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
-        case ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED:
-        default:
-            lensState = ANDROID_LENS_STATE_STATIONARY;
-            break;
-    }
-    settings.update(ANDROID_LENS_STATE, &lensState, 1);
-
+  uint8_t lensState;
+  switch (mAfState) {
+    case ANDROID_CONTROL_AF_STATE_PASSIVE_SCAN:
+    case ANDROID_CONTROL_AF_STATE_ACTIVE_SCAN:
+      lensState = ANDROID_LENS_STATE_MOVING;
+      break;
+    case ANDROID_CONTROL_AF_STATE_INACTIVE:
+    case ANDROID_CONTROL_AF_STATE_PASSIVE_FOCUSED:
+    case ANDROID_CONTROL_AF_STATE_FOCUSED_LOCKED:
+    case ANDROID_CONTROL_AF_STATE_NOT_FOCUSED_LOCKED:
+    case ANDROID_CONTROL_AF_STATE_PASSIVE_UNFOCUSED:
+    default:
+      lensState = ANDROID_LENS_STATE_STATIONARY;
+      break;
+  }
+  settings.update(ANDROID_LENS_STATE, &lensState, 1);
 }
 
 void EmulatedFakeCamera3::signalReadoutIdle() {
-    Mutex::Autolock l(mLock);
-    // Need to chek isIdle again because waiting on mLock may have allowed
-    // something to be placed in the in-flight queue.
-    if (mStatus == STATUS_ACTIVE && mReadoutThread->isIdle()) {
-        ALOGV("Now idle");
-        mStatus = STATUS_READY;
-    }
+  Mutex::Autolock l(mLock);
+  // Need to chek isIdle again because waiting on mLock may have allowed
+  // something to be placed in the in-flight queue.
+  if (mStatus == STATUS_ACTIVE && mReadoutThread->isIdle()) {
+    ALOGV("Now idle");
+    mStatus = STATUS_READY;
+  }
 }
 
 void EmulatedFakeCamera3::onSensorEvent(uint32_t frameNumber, Event e,
-        nsecs_t timestamp) {
-    switch(e) {
-        case Sensor::SensorListener::EXPOSURE_START: {
-            ALOGVV("%s: Frame %d: Sensor started exposure at %lld",
-                    __FUNCTION__, frameNumber, timestamp);
-            // Trigger shutter notify to framework
-            camera3_notify_msg_t msg;
-            msg.type = CAMERA3_MSG_SHUTTER;
-            msg.message.shutter.frame_number = frameNumber;
-            msg.message.shutter.timestamp = timestamp;
-            sendNotify(&msg);
-            break;
-        }
-        default:
-            ALOGW("%s: Unexpected sensor event %d at %" PRId64, __FUNCTION__,
-                    e, timestamp);
-            break;
+                                        nsecs_t timestamp) {
+  switch (e) {
+    case Sensor::SensorListener::EXPOSURE_START: {
+      ALOGVV("%s: Frame %d: Sensor started exposure at %lld", __FUNCTION__,
+             frameNumber, timestamp);
+      // Trigger shutter notify to framework
+      camera3_notify_msg_t msg;
+      msg.type = CAMERA3_MSG_SHUTTER;
+      msg.message.shutter.frame_number = frameNumber;
+      msg.message.shutter.timestamp = timestamp;
+      sendNotify(&msg);
+      break;
     }
+    default:
+      ALOGW("%s: Unexpected sensor event %d at %" PRId64, __FUNCTION__, e,
+            timestamp);
+      break;
+  }
 }
 
-EmulatedFakeCamera3::ReadoutThread::ReadoutThread(EmulatedFakeCamera3 *parent) :
-        mParent(parent), mJpegWaiting(false) {
-}
+EmulatedFakeCamera3::ReadoutThread::ReadoutThread(EmulatedFakeCamera3 *parent)
+    : mParent(parent), mJpegWaiting(false) {}
 
 EmulatedFakeCamera3::ReadoutThread::~ReadoutThread() {
-    for (List<Request>::iterator i = mInFlightQueue.begin();
-         i != mInFlightQueue.end(); i++) {
-        delete i->buffers;
-        delete i->sensorBuffers;
-    }
+  for (List<Request>::iterator i = mInFlightQueue.begin();
+       i != mInFlightQueue.end(); i++) {
+    delete i->buffers;
+    delete i->sensorBuffers;
+  }
 }
 
 void EmulatedFakeCamera3::ReadoutThread::queueCaptureRequest(const Request &r) {
-    Mutex::Autolock l(mLock);
+  Mutex::Autolock l(mLock);
 
-    mInFlightQueue.push_back(r);
-    mInFlightSignal.signal();
+  mInFlightQueue.push_back(r);
+  mInFlightSignal.signal();
 }
 
 bool EmulatedFakeCamera3::ReadoutThread::isIdle() {
-    Mutex::Autolock l(mLock);
-    return mInFlightQueue.empty() && !mThreadActive;
+  Mutex::Autolock l(mLock);
+  return mInFlightQueue.empty() && !mThreadActive;
 }
 
 status_t EmulatedFakeCamera3::ReadoutThread::waitForReadout() {
-    status_t res;
-    Mutex::Autolock l(mLock);
-    int loopCount = 0;
-    while (mInFlightQueue.size() >= kMaxQueueSize) {
-        res = mInFlightSignal.waitRelative(mLock, kWaitPerLoop);
-        if (res != OK && res != TIMED_OUT) {
-            ALOGE("%s: Error waiting for in-flight queue to shrink",
-                    __FUNCTION__);
-            return INVALID_OPERATION;
-        }
-        if (loopCount == kMaxWaitLoops) {
-            ALOGE("%s: Timed out waiting for in-flight queue to shrink",
-                    __FUNCTION__);
-            return TIMED_OUT;
-        }
-        loopCount++;
+  status_t res;
+  Mutex::Autolock l(mLock);
+  int loopCount = 0;
+  while (mInFlightQueue.size() >= kMaxQueueSize) {
+    res = mInFlightSignal.waitRelative(mLock, kWaitPerLoop);
+    if (res != OK && res != TIMED_OUT) {
+      ALOGE("%s: Error waiting for in-flight queue to shrink", __FUNCTION__);
+      return INVALID_OPERATION;
     }
-    return OK;
+    if (loopCount == kMaxWaitLoops) {
+      ALOGE("%s: Timed out waiting for in-flight queue to shrink",
+            __FUNCTION__);
+      return TIMED_OUT;
+    }
+    loopCount++;
+  }
+  return OK;
 }
 
 bool EmulatedFakeCamera3::ReadoutThread::threadLoop() {
-    status_t res;
+  status_t res;
 
-    ALOGVV("%s: ReadoutThread waiting for request", __FUNCTION__);
+  ALOGVV("%s: ReadoutThread waiting for request", __FUNCTION__);
 
-    // First wait for a request from the in-flight queue
+  // First wait for a request from the in-flight queue
 
-    if (mCurrentRequest.settings.isEmpty()) {
-        Mutex::Autolock l(mLock);
-        if (mInFlightQueue.empty()) {
-            res = mInFlightSignal.waitRelative(mLock, kWaitPerLoop);
-            if (res == TIMED_OUT) {
-                ALOGVV("%s: ReadoutThread: Timed out waiting for request",
-                        __FUNCTION__);
-                return true;
-            } else if (res != NO_ERROR) {
-                ALOGE("%s: Error waiting for capture requests: %d",
-                        __FUNCTION__, res);
-                return false;
-            }
-        }
-        mCurrentRequest.frameNumber = mInFlightQueue.begin()->frameNumber;
-        mCurrentRequest.settings.acquire(mInFlightQueue.begin()->settings);
-        mCurrentRequest.buffers = mInFlightQueue.begin()->buffers;
-        mCurrentRequest.sensorBuffers = mInFlightQueue.begin()->sensorBuffers;
-        mInFlightQueue.erase(mInFlightQueue.begin());
-        mInFlightSignal.signal();
-        mThreadActive = true;
-        ALOGVV("%s: Beginning readout of frame %d", __FUNCTION__,
-                mCurrentRequest.frameNumber);
-    }
-
-    // Then wait for it to be delivered from the sensor
-    ALOGVV("%s: ReadoutThread: Wait for frame to be delivered from sensor",
-            __FUNCTION__);
-
-    nsecs_t captureTime;
-    bool gotFrame =
-            mParent->mSensor->waitForNewFrame(kWaitPerLoop, &captureTime);
-    if (!gotFrame) {
-        ALOGVV("%s: ReadoutThread: Timed out waiting for sensor frame",
-                __FUNCTION__);
+  if (mCurrentRequest.settings.isEmpty()) {
+    Mutex::Autolock l(mLock);
+    if (mInFlightQueue.empty()) {
+      res = mInFlightSignal.waitRelative(mLock, kWaitPerLoop);
+      if (res == TIMED_OUT) {
+        ALOGVV("%s: ReadoutThread: Timed out waiting for request",
+               __FUNCTION__);
         return true;
+      } else if (res != NO_ERROR) {
+        ALOGE("%s: Error waiting for capture requests: %d", __FUNCTION__, res);
+        return false;
+      }
     }
+    mCurrentRequest.frameNumber = mInFlightQueue.begin()->frameNumber;
+    mCurrentRequest.settings.acquire(mInFlightQueue.begin()->settings);
+    mCurrentRequest.buffers = mInFlightQueue.begin()->buffers;
+    mCurrentRequest.sensorBuffers = mInFlightQueue.begin()->sensorBuffers;
+    mInFlightQueue.erase(mInFlightQueue.begin());
+    mInFlightSignal.signal();
+    mThreadActive = true;
+    ALOGVV("%s: Beginning readout of frame %d", __FUNCTION__,
+           mCurrentRequest.frameNumber);
+  }
 
-    ALOGVV("Sensor done with readout for frame %d, captured at %lld ",
-            mCurrentRequest.frameNumber, captureTime);
+  // Then wait for it to be delivered from the sensor
+  ALOGVV("%s: ReadoutThread: Wait for frame to be delivered from sensor",
+         __FUNCTION__);
 
-    // Check if we need to JPEG encode a buffer, and send it for async
-    // compression if so. Otherwise prepare the buffer for return.
-    bool needJpeg = false;
-    HalBufferVector::iterator buf = mCurrentRequest.buffers->begin();
-    while(buf != mCurrentRequest.buffers->end()) {
-        bool goodBuffer = true;
-        if ( buf->stream->format ==
-                HAL_PIXEL_FORMAT_BLOB && buf->stream->data_space != HAL_DATASPACE_DEPTH) {
-            Mutex::Autolock jl(mJpegLock);
-            if (mJpegWaiting) {
-                // This shouldn't happen, because processCaptureRequest should
-                // be stalling until JPEG compressor is free.
-                ALOGE("%s: Already processing a JPEG!", __FUNCTION__);
-                goodBuffer = false;
-            }
-            if (goodBuffer) {
-                // Compressor takes ownership of sensorBuffers here
-                res = mParent->mJpegCompressor->start(mCurrentRequest.sensorBuffers,
-                        this);
-                goodBuffer = (res == OK);
-            }
-            if (goodBuffer) {
-                needJpeg = true;
-
-                mJpegHalBuffer = *buf;
-                mJpegFrameNumber = mCurrentRequest.frameNumber;
-                mJpegWaiting = true;
-
-                mCurrentRequest.sensorBuffers = NULL;
-                buf = mCurrentRequest.buffers->erase(buf);
-
-                continue;
-            }
-            ALOGE("%s: Error compressing output buffer: %s (%d)",
-                        __FUNCTION__, strerror(-res), res);
-            // fallthrough for cleanup
-        }
-        GrallocModule::getInstance().unlock(*(buf->buffer));
-
-        buf->status = goodBuffer ? CAMERA3_BUFFER_STATUS_OK :
-                CAMERA3_BUFFER_STATUS_ERROR;
-        buf->acquire_fence = -1;
-        buf->release_fence = -1;
-
-        ++buf;
-    } // end while
-
-    // Construct result for all completed buffers and results
-
-    camera3_capture_result result;
-
-    if (mParent->hasCapability(BACKWARD_COMPATIBLE)) {
-        static const uint8_t sceneFlicker = ANDROID_STATISTICS_SCENE_FLICKER_NONE;
-        mCurrentRequest.settings.update(ANDROID_STATISTICS_SCENE_FLICKER,
-                &sceneFlicker, 1);
-
-        static const uint8_t flashState = ANDROID_FLASH_STATE_UNAVAILABLE;
-        mCurrentRequest.settings.update(ANDROID_FLASH_STATE,
-                &flashState, 1);
-
-        nsecs_t rollingShutterSkew = Sensor::kFrameDurationRange[0];
-        mCurrentRequest.settings.update(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
-                &rollingShutterSkew, 1);
-
-        float focusRange[] = { 1.0f/5.0f, 0 }; // 5 m to infinity in focus
-        mCurrentRequest.settings.update(ANDROID_LENS_FOCUS_RANGE,
-                focusRange, sizeof(focusRange)/sizeof(float));
-    }
-
-    if (mParent->hasCapability(DEPTH_OUTPUT)) {
-        camera_metadata_entry_t entry;
-
-        find_camera_metadata_entry(mParent->mCameraInfo, ANDROID_LENS_POSE_TRANSLATION, &entry);
-        mCurrentRequest.settings.update(ANDROID_LENS_POSE_TRANSLATION,
-                entry.data.f, entry.count);
-
-        find_camera_metadata_entry(mParent->mCameraInfo, ANDROID_LENS_POSE_ROTATION, &entry);
-        mCurrentRequest.settings.update(ANDROID_LENS_POSE_ROTATION,
-                entry.data.f, entry.count);
-
-        find_camera_metadata_entry(mParent->mCameraInfo, ANDROID_LENS_INTRINSIC_CALIBRATION, &entry);
-        mCurrentRequest.settings.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
-                entry.data.f, entry.count);
-
-        find_camera_metadata_entry(mParent->mCameraInfo, ANDROID_LENS_RADIAL_DISTORTION, &entry);
-        mCurrentRequest.settings.update(ANDROID_LENS_RADIAL_DISTORTION,
-                entry.data.f, entry.count);
-    }
-
-    mCurrentRequest.settings.update(ANDROID_SENSOR_TIMESTAMP,
-            &captureTime, 1);
-
-
-    // JPEGs take a stage longer
-    const uint8_t pipelineDepth = needJpeg ? kMaxBufferCount : kMaxBufferCount - 1;
-    mCurrentRequest.settings.update(ANDROID_REQUEST_PIPELINE_DEPTH,
-            &pipelineDepth, 1);
-
-    result.frame_number = mCurrentRequest.frameNumber;
-    result.result = mCurrentRequest.settings.getAndLock();
-    result.num_output_buffers = mCurrentRequest.buffers->size();
-    result.output_buffers = mCurrentRequest.buffers->array();
-    result.input_buffer = nullptr;
-    result.partial_result = 1;
-
-    // Go idle if queue is empty, before sending result
-    bool signalIdle = false;
-    {
-        Mutex::Autolock l(mLock);
-        if (mInFlightQueue.empty()) {
-            mThreadActive = false;
-            signalIdle = true;
-        }
-    }
-    if (signalIdle) mParent->signalReadoutIdle();
-
-    // Send it off to the framework
-    ALOGVV("%s: ReadoutThread: Send result to framework",
-            __FUNCTION__);
-    mParent->sendCaptureResult(&result);
-
-    // Clean up
-    mCurrentRequest.settings.unlock(result.result);
-
-    delete mCurrentRequest.buffers;
-    mCurrentRequest.buffers = NULL;
-    if (!needJpeg) {
-        delete mCurrentRequest.sensorBuffers;
-        mCurrentRequest.sensorBuffers = NULL;
-    }
-    mCurrentRequest.settings.clear();
-
+  nsecs_t captureTime;
+  bool gotFrame = mParent->mSensor->waitForNewFrame(kWaitPerLoop, &captureTime);
+  if (!gotFrame) {
+    ALOGVV("%s: ReadoutThread: Timed out waiting for sensor frame",
+           __FUNCTION__);
     return true;
+  }
+
+  ALOGVV("Sensor done with readout for frame %d, captured at %lld ",
+         mCurrentRequest.frameNumber, captureTime);
+
+  // Check if we need to JPEG encode a buffer, and send it for async
+  // compression if so. Otherwise prepare the buffer for return.
+  bool needJpeg = false;
+  HalBufferVector::iterator buf = mCurrentRequest.buffers->begin();
+  while (buf != mCurrentRequest.buffers->end()) {
+    bool goodBuffer = true;
+    if (buf->stream->format == HAL_PIXEL_FORMAT_BLOB &&
+        buf->stream->data_space != HAL_DATASPACE_DEPTH) {
+      Mutex::Autolock jl(mJpegLock);
+      if (mJpegWaiting) {
+        // This shouldn't happen, because processCaptureRequest should
+        // be stalling until JPEG compressor is free.
+        ALOGE("%s: Already processing a JPEG!", __FUNCTION__);
+        goodBuffer = false;
+      }
+      if (goodBuffer) {
+        // Compressor takes ownership of sensorBuffers here
+        res = mParent->mJpegCompressor->start(mCurrentRequest.sensorBuffers,
+                                              this);
+        goodBuffer = (res == OK);
+      }
+      if (goodBuffer) {
+        needJpeg = true;
+
+        mJpegHalBuffer = *buf;
+        mJpegFrameNumber = mCurrentRequest.frameNumber;
+        mJpegWaiting = true;
+
+        mCurrentRequest.sensorBuffers = NULL;
+        buf = mCurrentRequest.buffers->erase(buf);
+
+        continue;
+      }
+      ALOGE("%s: Error compressing output buffer: %s (%d)", __FUNCTION__,
+            strerror(-res), res);
+      // fallthrough for cleanup
+    }
+    GrallocModule::getInstance().unlock(*(buf->buffer));
+
+    buf->status =
+        goodBuffer ? CAMERA3_BUFFER_STATUS_OK : CAMERA3_BUFFER_STATUS_ERROR;
+    buf->acquire_fence = -1;
+    buf->release_fence = -1;
+
+    ++buf;
+  }  // end while
+
+  // Construct result for all completed buffers and results
+
+  camera3_capture_result result;
+
+  if (mParent->hasCapability(BACKWARD_COMPATIBLE)) {
+    static const uint8_t sceneFlicker = ANDROID_STATISTICS_SCENE_FLICKER_NONE;
+    mCurrentRequest.settings.update(ANDROID_STATISTICS_SCENE_FLICKER,
+                                    &sceneFlicker, 1);
+
+    static const uint8_t flashState = ANDROID_FLASH_STATE_UNAVAILABLE;
+    mCurrentRequest.settings.update(ANDROID_FLASH_STATE, &flashState, 1);
+
+    nsecs_t rollingShutterSkew = Sensor::kFrameDurationRange[0];
+    mCurrentRequest.settings.update(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
+                                    &rollingShutterSkew, 1);
+
+    float focusRange[] = {1.0f / 5.0f, 0};  // 5 m to infinity in focus
+    mCurrentRequest.settings.update(ANDROID_LENS_FOCUS_RANGE, focusRange,
+                                    sizeof(focusRange) / sizeof(float));
+  }
+
+  if (mParent->hasCapability(DEPTH_OUTPUT)) {
+    camera_metadata_entry_t entry;
+
+    find_camera_metadata_entry(mParent->mCameraInfo,
+                               ANDROID_LENS_POSE_TRANSLATION, &entry);
+    mCurrentRequest.settings.update(ANDROID_LENS_POSE_TRANSLATION, entry.data.f,
+                                    entry.count);
+
+    find_camera_metadata_entry(mParent->mCameraInfo, ANDROID_LENS_POSE_ROTATION,
+                               &entry);
+    mCurrentRequest.settings.update(ANDROID_LENS_POSE_ROTATION, entry.data.f,
+                                    entry.count);
+
+    find_camera_metadata_entry(mParent->mCameraInfo,
+                               ANDROID_LENS_INTRINSIC_CALIBRATION, &entry);
+    mCurrentRequest.settings.update(ANDROID_LENS_INTRINSIC_CALIBRATION,
+                                    entry.data.f, entry.count);
+
+    find_camera_metadata_entry(mParent->mCameraInfo,
+                               ANDROID_LENS_RADIAL_DISTORTION, &entry);
+    mCurrentRequest.settings.update(ANDROID_LENS_RADIAL_DISTORTION,
+                                    entry.data.f, entry.count);
+  }
+
+  mCurrentRequest.settings.update(ANDROID_SENSOR_TIMESTAMP, &captureTime, 1);
+
+  // JPEGs take a stage longer
+  const uint8_t pipelineDepth =
+      needJpeg ? kMaxBufferCount : kMaxBufferCount - 1;
+  mCurrentRequest.settings.update(ANDROID_REQUEST_PIPELINE_DEPTH,
+                                  &pipelineDepth, 1);
+
+  result.frame_number = mCurrentRequest.frameNumber;
+  result.result = mCurrentRequest.settings.getAndLock();
+  result.num_output_buffers = mCurrentRequest.buffers->size();
+  result.output_buffers = mCurrentRequest.buffers->array();
+  result.input_buffer = nullptr;
+  result.partial_result = 1;
+
+  // Go idle if queue is empty, before sending result
+  bool signalIdle = false;
+  {
+    Mutex::Autolock l(mLock);
+    if (mInFlightQueue.empty()) {
+      mThreadActive = false;
+      signalIdle = true;
+    }
+  }
+  if (signalIdle) mParent->signalReadoutIdle();
+
+  // Send it off to the framework
+  ALOGVV("%s: ReadoutThread: Send result to framework", __FUNCTION__);
+  mParent->sendCaptureResult(&result);
+
+  // Clean up
+  mCurrentRequest.settings.unlock(result.result);
+
+  delete mCurrentRequest.buffers;
+  mCurrentRequest.buffers = NULL;
+  if (!needJpeg) {
+    delete mCurrentRequest.sensorBuffers;
+    mCurrentRequest.sensorBuffers = NULL;
+  }
+  mCurrentRequest.settings.clear();
+
+  return true;
 }
 
 void EmulatedFakeCamera3::ReadoutThread::onJpegDone(
-        const StreamBuffer &jpegBuffer, bool success) {
-    Mutex::Autolock jl(mJpegLock);
+    const StreamBuffer &jpegBuffer, bool success) {
+  Mutex::Autolock jl(mJpegLock);
 
-    GrallocModule::getInstance().unlock(*(jpegBuffer.buffer));
+  GrallocModule::getInstance().unlock(*(jpegBuffer.buffer));
 
-    mJpegHalBuffer.status = success ?
-            CAMERA3_BUFFER_STATUS_OK : CAMERA3_BUFFER_STATUS_ERROR;
-    mJpegHalBuffer.acquire_fence = -1;
-    mJpegHalBuffer.release_fence = -1;
-    mJpegWaiting = false;
+  mJpegHalBuffer.status =
+      success ? CAMERA3_BUFFER_STATUS_OK : CAMERA3_BUFFER_STATUS_ERROR;
+  mJpegHalBuffer.acquire_fence = -1;
+  mJpegHalBuffer.release_fence = -1;
+  mJpegWaiting = false;
 
-    camera3_capture_result result;
+  camera3_capture_result result;
 
-    result.frame_number = mJpegFrameNumber;
-    result.result = NULL;
-    result.num_output_buffers = 1;
-    result.output_buffers = &mJpegHalBuffer;
-    result.input_buffer = nullptr;
-    result.partial_result = 0;
+  result.frame_number = mJpegFrameNumber;
+  result.result = NULL;
+  result.num_output_buffers = 1;
+  result.output_buffers = &mJpegHalBuffer;
+  result.input_buffer = nullptr;
+  result.partial_result = 0;
 
-    if (!success) {
-        ALOGE("%s: Compression failure, returning error state buffer to"
-                " framework", __FUNCTION__);
-    } else {
-        ALOGV("%s: Compression complete, returning buffer to framework",
-                __FUNCTION__);
-    }
+  if (!success) {
+    ALOGE(
+        "%s: Compression failure, returning error state buffer to"
+        " framework",
+        __FUNCTION__);
+  } else {
+    ALOGV("%s: Compression complete, returning buffer to framework",
+          __FUNCTION__);
+  }
 
-    mParent->sendCaptureResult(&result);
+  mParent->sendCaptureResult(&result);
 }
 
 void EmulatedFakeCamera3::ReadoutThread::onJpegInputDone(
-        const StreamBuffer &inputBuffer) {
-    // Should never get here, since the input buffer has to be returned
-    // by end of processCaptureRequest
-    ALOGE("%s: Unexpected input buffer from JPEG compressor!", __FUNCTION__);
+    const StreamBuffer &inputBuffer) {
+  // Should never get here, since the input buffer has to be returned
+  // by end of processCaptureRequest
+  ALOGE("%s: Unexpected input buffer from JPEG compressor!", __FUNCTION__);
 }
 
-
-}; // namespace android
+};  // namespace android
diff --git a/guest/hals/camera/EmulatedFakeCamera3.h b/guest/hals/camera/EmulatedFakeCamera3.h
index f0cf68f..5995478 100644
--- a/guest/hals/camera/EmulatedFakeCamera3.h
+++ b/guest/hals/camera/EmulatedFakeCamera3.h
@@ -23,14 +23,14 @@
  * interace.
  */
 
-#include "EmulatedCamera3.h"
-#include "fake-pipeline2/Base.h"
-#include "fake-pipeline2/Sensor.h"
-#include "fake-pipeline2/JpegCompressor.h"
 #include <camera/CameraMetadata.h>
-#include <utils/SortedVector.h>
 #include <utils/List.h>
 #include <utils/Mutex.h>
+#include <utils/SortedVector.h>
+#include "EmulatedCamera3.h"
+#include "fake-pipeline2/Base.h"
+#include "fake-pipeline2/JpegCompressor.h"
+#include "fake-pipeline2/Sensor.h"
 
 namespace android {
 
@@ -44,251 +44,244 @@
  * response to hw_module_methods_t::open, and camera_device::close callbacks.
  */
 class EmulatedFakeCamera3 : public EmulatedCamera3,
-        private Sensor::SensorListener {
-public:
+                            private Sensor::SensorListener {
+ public:
+  EmulatedFakeCamera3(int cameraId, bool facingBack,
+                      struct hw_module_t *module);
 
-    EmulatedFakeCamera3(int cameraId, bool facingBack,
-            struct hw_module_t* module);
+  virtual ~EmulatedFakeCamera3();
 
-    virtual ~EmulatedFakeCamera3();
+  /****************************************************************************
+   * EmulatedCamera3 virtual overrides
+   ***************************************************************************/
 
-    /****************************************************************************
-     * EmulatedCamera3 virtual overrides
-     ***************************************************************************/
+ public:
+  virtual status_t Initialize(const cvd::CameraDefinition &params);
 
-public:
+  /****************************************************************************
+   * Camera module API and generic hardware device API implementation
+   ***************************************************************************/
 
-    virtual status_t Initialize(const cvd::CameraDefinition& params);
+ public:
+  virtual status_t connectCamera(hw_device_t **device);
 
-    /****************************************************************************
-     * Camera module API and generic hardware device API implementation
-     ***************************************************************************/
+  virtual status_t closeCamera();
 
-public:
-    virtual status_t connectCamera(hw_device_t** device);
+  virtual status_t getCameraInfo(struct camera_info *info);
 
-    virtual status_t closeCamera();
+  virtual status_t setTorchMode(bool enabled);
 
-    virtual status_t getCameraInfo(struct camera_info *info);
+  /****************************************************************************
+   * EmulatedCamera3 abstract API implementation
+   ***************************************************************************/
 
-    virtual status_t setTorchMode(bool enabled);
+ protected:
+  virtual status_t configureStreams(camera3_stream_configuration *streamList);
 
-    /****************************************************************************
-     * EmulatedCamera3 abstract API implementation
-     ***************************************************************************/
+  virtual status_t registerStreamBuffers(
+      const camera3_stream_buffer_set *bufferSet);
 
-protected:
+  virtual const camera_metadata_t *constructDefaultRequestSettings(int type);
 
-    virtual status_t configureStreams(
-        camera3_stream_configuration *streamList);
+  virtual status_t processCaptureRequest(camera3_capture_request *request);
 
-    virtual status_t registerStreamBuffers(
-        const camera3_stream_buffer_set *bufferSet) ;
+  virtual status_t flush();
 
-    virtual const camera_metadata_t* constructDefaultRequestSettings(
-        int type);
+  /** Debug methods */
 
-    virtual status_t processCaptureRequest(camera3_capture_request *request);
+  virtual void dump(int fd);
 
-    virtual status_t flush();
+ private:
+  /**
+   * Get the requested capability set for this camera
+   */
+  status_t getCameraCapabilities();
 
-    /** Debug methods */
+  bool hasCapability(AvailableCapabilities cap);
 
-    virtual void dump(int fd);
+  /**
+   * Build the static info metadata buffer for this device
+   */
+  status_t constructStaticInfo(const cvd::CameraDefinition &params);
 
-private:
+  /**
+   * Run the fake 3A algorithms as needed. May override/modify settings
+   * values.
+   */
+  status_t process3A(CameraMetadata &settings);
 
-    /**
-     * Get the requested capability set for this camera
-     */
-    status_t getCameraCapabilities();
+  status_t doFakeAE(CameraMetadata &settings);
+  status_t doFakeAF(CameraMetadata &settings);
+  status_t doFakeAWB(CameraMetadata &settings);
+  void update3A(CameraMetadata &settings);
 
-    bool hasCapability(AvailableCapabilities cap);
+  /** Signal from readout thread that it doesn't have anything to do */
+  void signalReadoutIdle();
 
-    /**
-     * Build the static info metadata buffer for this device
-     */
-    status_t constructStaticInfo(const cvd::CameraDefinition& params);
+  /** Handle interrupt events from the sensor */
+  void onSensorEvent(uint32_t frameNumber, Event e, nsecs_t timestamp);
 
-    /**
-     * Run the fake 3A algorithms as needed. May override/modify settings
-     * values.
-     */
-    status_t process3A(CameraMetadata &settings);
+  /****************************************************************************
+   * Static configuration information
+   ***************************************************************************/
+ private:
+  static const uint32_t kMaxRawStreamCount = 1;
+  static const uint32_t kMaxProcessedStreamCount = 3;
+  static const uint32_t kMaxJpegStreamCount = 1;
+  static const uint32_t kMaxReprocessStreamCount = 2;
+  static const uint32_t kMaxBufferCount = 4;
+  // We need a positive stream ID to distinguish external buffers from
+  // sensor-generated buffers which use a nonpositive ID. Otherwise, HAL3 has
+  // no concept of a stream id.
+  static const uint32_t kGenericStreamId = 1;
+  static const int32_t kAvailableFormats[];
 
-    status_t doFakeAE(CameraMetadata &settings);
-    status_t doFakeAF(CameraMetadata &settings);
-    status_t doFakeAWB(CameraMetadata &settings);
-    void     update3A(CameraMetadata &settings);
+  static const int64_t kSyncWaitTimeout = 10000000;   // 10 ms
+  static const int32_t kMaxSyncTimeoutCount = 1000;   // 1000 kSyncWaitTimeouts
+  static const uint32_t kFenceTimeoutMs = 2000;       // 2 s
+  static const nsecs_t kJpegTimeoutNs = 5000000000l;  // 5 s
 
-    /** Signal from readout thread that it doesn't have anything to do */
-    void     signalReadoutIdle();
+  /****************************************************************************
+   * Data members.
+   ***************************************************************************/
 
-    /** Handle interrupt events from the sensor */
-    void     onSensorEvent(uint32_t frameNumber, Event e, nsecs_t timestamp);
+  /* HAL interface serialization lock. */
+  Mutex mLock;
 
-    /****************************************************************************
-     * Static configuration information
-     ***************************************************************************/
-private:
-    static const uint32_t kMaxRawStreamCount = 1;
-    static const uint32_t kMaxProcessedStreamCount = 3;
-    static const uint32_t kMaxJpegStreamCount = 1;
-    static const uint32_t kMaxReprocessStreamCount = 2;
-    static const uint32_t kMaxBufferCount = 4;
-    // We need a positive stream ID to distinguish external buffers from
-    // sensor-generated buffers which use a nonpositive ID. Otherwise, HAL3 has
-    // no concept of a stream id.
-    static const uint32_t kGenericStreamId = 1;
-    static const int32_t  kAvailableFormats[];
+  /* Facing back (true) or front (false) switch. */
+  bool mFacingBack;
+  int32_t mSensorWidth;
+  int32_t mSensorHeight;
 
-    static const int64_t  kSyncWaitTimeout     = 10000000; // 10 ms
-    static const int32_t  kMaxSyncTimeoutCount = 1000; // 1000 kSyncWaitTimeouts
-    static const uint32_t kFenceTimeoutMs      = 2000; // 2 s
-    static const nsecs_t  kJpegTimeoutNs       = 5000000000l; // 5 s
+  SortedVector<AvailableCapabilities> mCapabilities;
 
-    /****************************************************************************
-     * Data members.
-     ***************************************************************************/
+  /**
+   * Cache for default templates. Once one is requested, the pointer must be
+   * valid at least until close() is called on the device
+   */
+  camera_metadata_t *mDefaultTemplates[CAMERA3_TEMPLATE_COUNT];
 
-    /* HAL interface serialization lock. */
-    Mutex              mLock;
+  /**
+   * Private stream information, stored in camera3_stream_t->priv.
+   */
+  struct PrivateStreamInfo {
+    bool alive;
+  };
 
-    /* Facing back (true) or front (false) switch. */
-    bool               mFacingBack;
-    int32_t            mSensorWidth;
-    int32_t            mSensorHeight;
+  // Shortcut to the input stream
+  camera3_stream_t *mInputStream;
 
-    SortedVector<AvailableCapabilities> mCapabilities;
+  typedef List<camera3_stream_t *> StreamList;
+  typedef List<camera3_stream_t *>::iterator StreamIterator;
+  typedef Vector<camera3_stream_buffer> HalBufferVector;
 
-    /**
-     * Cache for default templates. Once one is requested, the pointer must be
-     * valid at least until close() is called on the device
-     */
-    camera_metadata_t *mDefaultTemplates[CAMERA3_TEMPLATE_COUNT];
+  // All streams, including input stream
+  StreamList mStreams;
 
-    /**
-     * Private stream information, stored in camera3_stream_t->priv.
-     */
-    struct PrivateStreamInfo {
-        bool alive;
+  // Cached settings from latest submitted request
+  CameraMetadata mPrevSettings;
+
+  /** Fake hardware interfaces */
+  sp<Sensor> mSensor;
+  sp<JpegCompressor> mJpegCompressor;
+  friend class JpegCompressor;
+
+  /** Processing thread for sending out results */
+
+  class ReadoutThread : public Thread, private JpegCompressor::JpegListener {
+   public:
+    ReadoutThread(EmulatedFakeCamera3 *parent);
+    ~ReadoutThread();
+
+    struct Request {
+      uint32_t frameNumber;
+      CameraMetadata settings;
+      HalBufferVector *buffers;
+      Buffers *sensorBuffers;
     };
 
-    // Shortcut to the input stream
-    camera3_stream_t*  mInputStream;
+    /**
+     * Interface to parent class
+     */
 
-    typedef List<camera3_stream_t*>           StreamList;
-    typedef List<camera3_stream_t*>::iterator StreamIterator;
-    typedef Vector<camera3_stream_buffer>     HalBufferVector;
+    // Place request in the in-flight queue to wait for sensor capture
+    void queueCaptureRequest(const Request &r);
 
-    // All streams, including input stream
-    StreamList         mStreams;
+    // Test if the readout thread is idle (no in-flight requests, not
+    // currently reading out anything
+    bool isIdle();
 
-    // Cached settings from latest submitted request
-    CameraMetadata     mPrevSettings;
+    // Wait until isIdle is true
+    status_t waitForReadout();
 
-    /** Fake hardware interfaces */
-    sp<Sensor>         mSensor;
-    sp<JpegCompressor> mJpegCompressor;
-    friend class       JpegCompressor;
+   private:
+    static const nsecs_t kWaitPerLoop = 10000000L;  // 10 ms
+    static const nsecs_t kMaxWaitLoops = 1000;
+    static const size_t kMaxQueueSize = 2;
 
-    /** Processing thread for sending out results */
+    EmulatedFakeCamera3 *mParent;
+    Mutex mLock;
 
-    class ReadoutThread : public Thread, private JpegCompressor::JpegListener {
-      public:
-        ReadoutThread(EmulatedFakeCamera3 *parent);
-        ~ReadoutThread();
+    List<Request> mInFlightQueue;
+    Condition mInFlightSignal;
+    bool mThreadActive;
 
-        struct Request {
-            uint32_t         frameNumber;
-            CameraMetadata   settings;
-            HalBufferVector *buffers;
-            Buffers         *sensorBuffers;
-        };
+    virtual bool threadLoop();
 
-        /**
-         * Interface to parent class
-         */
+    // Only accessed by threadLoop
 
-        // Place request in the in-flight queue to wait for sensor capture
-        void     queueCaptureRequest(const Request &r);
+    Request mCurrentRequest;
 
-        // Test if the readout thread is idle (no in-flight requests, not
-        // currently reading out anything
-        bool     isIdle();
+    // Jpeg completion callbacks
 
-        // Wait until isIdle is true
-        status_t waitForReadout();
+    Mutex mJpegLock;
+    bool mJpegWaiting;
+    camera3_stream_buffer mJpegHalBuffer;
+    uint32_t mJpegFrameNumber;
+    virtual void onJpegDone(const StreamBuffer &jpegBuffer, bool success);
+    virtual void onJpegInputDone(const StreamBuffer &inputBuffer);
+  };
 
-      private:
-        static const nsecs_t kWaitPerLoop  = 10000000L; // 10 ms
-        static const nsecs_t kMaxWaitLoops = 1000;
-        static const size_t  kMaxQueueSize = 2;
+  sp<ReadoutThread> mReadoutThread;
 
-        EmulatedFakeCamera3 *mParent;
-        Mutex mLock;
+  /** Fake 3A constants */
 
-        List<Request> mInFlightQueue;
-        Condition     mInFlightSignal;
-        bool          mThreadActive;
+  static const nsecs_t kNormalExposureTime;
+  static const nsecs_t kFacePriorityExposureTime;
+  static const int kNormalSensitivity;
+  static const int kFacePrioritySensitivity;
+  // Rate of converging AE to new target value, as fraction of difference
+  // between current and target value.
+  static const float kExposureTrackRate;
+  // Minimum duration for precapture state. May be longer if slow to converge
+  // to target exposure
+  static const int kPrecaptureMinFrames;
+  // How often to restart AE 'scanning'
+  static const int kStableAeMaxFrames;
+  // Maximum stop below 'normal' exposure time that we'll wander to while
+  // pretending to converge AE. In powers of 2. (-2 == 1/4 as bright)
+  static const float kExposureWanderMin;
+  // Maximum stop above 'normal' exposure time that we'll wander to while
+  // pretending to converge AE. In powers of 2. (2 == 4x as bright)
+  static const float kExposureWanderMax;
 
-        virtual bool threadLoop();
+  /** Fake 3A state */
 
-        // Only accessed by threadLoop
+  uint8_t mControlMode;
+  bool mFacePriority;
+  uint8_t mAeState;
+  uint8_t mAfState;
+  uint8_t mAwbState;
+  uint8_t mAeMode;
+  uint8_t mAfMode;
+  uint8_t mAwbMode;
 
-        Request mCurrentRequest;
-
-        // Jpeg completion callbacks
-
-        Mutex                 mJpegLock;
-        bool                  mJpegWaiting;
-        camera3_stream_buffer mJpegHalBuffer;
-        uint32_t              mJpegFrameNumber;
-        virtual void onJpegDone(const StreamBuffer &jpegBuffer, bool success);
-        virtual void onJpegInputDone(const StreamBuffer &inputBuffer);
-    };
-
-    sp<ReadoutThread> mReadoutThread;
-
-    /** Fake 3A constants */
-
-    static const nsecs_t kNormalExposureTime;
-    static const nsecs_t kFacePriorityExposureTime;
-    static const int     kNormalSensitivity;
-    static const int     kFacePrioritySensitivity;
-    // Rate of converging AE to new target value, as fraction of difference between
-    // current and target value.
-    static const float   kExposureTrackRate;
-    // Minimum duration for precapture state. May be longer if slow to converge
-    // to target exposure
-    static const int     kPrecaptureMinFrames;
-    // How often to restart AE 'scanning'
-    static const int     kStableAeMaxFrames;
-    // Maximum stop below 'normal' exposure time that we'll wander to while
-    // pretending to converge AE. In powers of 2. (-2 == 1/4 as bright)
-    static const float   kExposureWanderMin;
-    // Maximum stop above 'normal' exposure time that we'll wander to while
-    // pretending to converge AE. In powers of 2. (2 == 4x as bright)
-    static const float   kExposureWanderMax;
-
-    /** Fake 3A state */
-
-    uint8_t mControlMode;
-    bool    mFacePriority;
-    uint8_t mAeState;
-    uint8_t mAfState;
-    uint8_t mAwbState;
-    uint8_t mAeMode;
-    uint8_t mAfMode;
-    uint8_t mAwbMode;
-
-    int     mAeCounter;
-    nsecs_t mAeCurrentExposureTime;
-    nsecs_t mAeTargetExposureTime;
-    int     mAeCurrentSensitivity;
-
+  int mAeCounter;
+  nsecs_t mAeCurrentExposureTime;
+  nsecs_t mAeTargetExposureTime;
+  int mAeCurrentSensitivity;
 };
 
-} // namespace android
+}  // namespace android
 
-#endif // HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H
+#endif  // HW_EMULATOR_CAMERA_EMULATED_CAMERA3_H
diff --git a/guest/hals/camera/EmulatedFakeCameraDevice.cpp b/guest/hals/camera/EmulatedFakeCameraDevice.cpp
index 8043add..088e7c6 100644
--- a/guest/hals/camera/EmulatedFakeCameraDevice.cpp
+++ b/guest/hals/camera/EmulatedFakeCameraDevice.cpp
@@ -21,13 +21,14 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_FakeDevice"
+#include "EmulatedFakeCameraDevice.h"
 #include <cutils/log.h>
 #include "EmulatedFakeCamera.h"
-#include "EmulatedFakeCameraDevice.h"
 
 namespace android {
 
-EmulatedFakeCameraDevice::EmulatedFakeCameraDevice(EmulatedFakeCamera* camera_hal)
+EmulatedFakeCameraDevice::EmulatedFakeCameraDevice(
+    EmulatedFakeCamera* camera_hal)
     : EmulatedCameraDevice(camera_hal),
       mBlackYUV(kBlack32),
       mWhiteYUV(kWhite32),
@@ -39,400 +40,386 @@
       mCheckY(0),
       mCcounter(0)
 #if EFCD_ROTATE_FRAME
-      , mLastRotatedAt(0),
-        mCurrentFrameType(0),
-        mCurrentColor(&mWhiteYUV)
+      ,
+      mLastRotatedAt(0),
+      mCurrentFrameType(0),
+      mCurrentColor(&mWhiteYUV)
 #endif  // EFCD_ROTATE_FRAME
 {
-    // Makes the image with the original exposure compensation darker.
-    // So the effects of changing the exposure compensation can be seen.
-    mBlackYUV.Y = mBlackYUV.Y / 2;
-    mWhiteYUV.Y = mWhiteYUV.Y / 2;
-    mRedYUV.Y = mRedYUV.Y / 2;
-    mGreenYUV.Y = mGreenYUV.Y / 2;
-    mBlueYUV.Y = mBlueYUV.Y / 2;
+  // Makes the image with the original exposure compensation darker.
+  // So the effects of changing the exposure compensation can be seen.
+  mBlackYUV.Y = mBlackYUV.Y / 2;
+  mWhiteYUV.Y = mWhiteYUV.Y / 2;
+  mRedYUV.Y = mRedYUV.Y / 2;
+  mGreenYUV.Y = mGreenYUV.Y / 2;
+  mBlueYUV.Y = mBlueYUV.Y / 2;
 }
 
-EmulatedFakeCameraDevice::~EmulatedFakeCameraDevice()
-{
-}
+EmulatedFakeCameraDevice::~EmulatedFakeCameraDevice() {}
 
 /****************************************************************************
  * Emulated camera device abstract interface implementation.
  ***************************************************************************/
 
-status_t EmulatedFakeCameraDevice::connectDevice()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedFakeCameraDevice::connectDevice() {
+  ALOGV("%s", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    if (!isInitialized()) {
-        ALOGE("%s: Fake camera device is not initialized.", __FUNCTION__);
-        return EINVAL;
-    }
-    if (isConnected()) {
-        ALOGW("%s: Fake camera device is already connected.", __FUNCTION__);
-        return NO_ERROR;
-    }
-
-    /* There is no device to connect to. */
-    mState = ECDS_CONNECTED;
-
+  Mutex::Autolock locker(&mObjectLock);
+  if (!isInitialized()) {
+    ALOGE("%s: Fake camera device is not initialized.", __FUNCTION__);
+    return EINVAL;
+  }
+  if (isConnected()) {
+    ALOGW("%s: Fake camera device is already connected.", __FUNCTION__);
     return NO_ERROR;
+  }
+
+  /* There is no device to connect to. */
+  mState = ECDS_CONNECTED;
+
+  return NO_ERROR;
 }
 
-status_t EmulatedFakeCameraDevice::disconnectDevice()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedFakeCameraDevice::disconnectDevice() {
+  ALOGV("%s", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    if (!isConnected()) {
-        ALOGW("%s: Fake camera device is already disconnected.", __FUNCTION__);
-        return NO_ERROR;
-    }
-    if (isStarted()) {
-        ALOGE("%s: Cannot disconnect from the started device.", __FUNCTION__);
-        return EINVAL;
-    }
-
-    /* There is no device to disconnect from. */
-    mState = ECDS_INITIALIZED;
-
+  Mutex::Autolock locker(&mObjectLock);
+  if (!isConnected()) {
+    ALOGW("%s: Fake camera device is already disconnected.", __FUNCTION__);
     return NO_ERROR;
+  }
+  if (isStarted()) {
+    ALOGE("%s: Cannot disconnect from the started device.", __FUNCTION__);
+    return EINVAL;
+  }
+
+  /* There is no device to disconnect from. */
+  mState = ECDS_INITIALIZED;
+
+  return NO_ERROR;
 }
 
-status_t EmulatedFakeCameraDevice::startDevice(int width,
-                                               int height,
-                                               uint32_t pix_fmt,
-                                               int fps)
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedFakeCameraDevice::startDevice(int width, int height,
+                                               uint32_t pix_fmt, int fps) {
+  ALOGV("%s", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    if (!isConnected()) {
-        ALOGE("%s: Fake camera device is not connected.", __FUNCTION__);
+  Mutex::Autolock locker(&mObjectLock);
+  if (!isConnected()) {
+    ALOGE("%s: Fake camera device is not connected.", __FUNCTION__);
+    return EINVAL;
+  }
+  if (isStarted()) {
+    ALOGE("%s: Fake camera device is already started.", __FUNCTION__);
+    return EINVAL;
+  }
+
+  /* Initialize the base class. */
+  const status_t res =
+      EmulatedCameraDevice::commonStartDevice(width, height, pix_fmt, fps);
+  if (res == NO_ERROR) {
+    /* Calculate U/V panes inside the framebuffer. */
+    switch (mPixelFormat) {
+      case V4L2_PIX_FMT_YVU420:
+        mFrameV = mCurrentFrame + mTotalPixels;
+        mFrameU = mFrameU + mTotalPixels / 4;
+        mUVStep = 1;
+        mUVTotalNum = mTotalPixels / 4;
+        break;
+
+      case V4L2_PIX_FMT_YUV420:
+        mFrameU = mCurrentFrame + mTotalPixels;
+        mFrameV = mFrameU + mTotalPixels / 4;
+        mUVStep = 1;
+        mUVTotalNum = mTotalPixels / 4;
+        break;
+
+      case V4L2_PIX_FMT_NV21:
+        /* Interleaved UV pane, V first. */
+        mFrameV = mCurrentFrame + mTotalPixels;
+        mFrameU = mFrameV + 1;
+        mUVStep = 2;
+        mUVTotalNum = mTotalPixels / 4;
+        break;
+
+      case V4L2_PIX_FMT_NV12:
+        /* Interleaved UV pane, U first. */
+        mFrameU = mCurrentFrame + mTotalPixels;
+        mFrameV = mFrameU + 1;
+        mUVStep = 2;
+        mUVTotalNum = mTotalPixels / 4;
+        break;
+
+      default:
+        ALOGE("%s: Unknown pixel format %.4s", __FUNCTION__,
+              reinterpret_cast<const char*>(&mPixelFormat));
         return EINVAL;
     }
-    if (isStarted()) {
-        ALOGE("%s: Fake camera device is already started.", __FUNCTION__);
-        return EINVAL;
-    }
+    /* Number of items in a single row inside U/V panes. */
+    mUVInRow = (width / 2) * mUVStep;
+    mState = ECDS_STARTED;
+    mCurFrameTimestamp = 0;
+  } else {
+    ALOGE("%s: commonStartDevice failed", __FUNCTION__);
+  }
 
-    /* Initialize the base class. */
-    const status_t res =
-        EmulatedCameraDevice::commonStartDevice(width, height, pix_fmt, fps);
-    if (res == NO_ERROR) {
-        /* Calculate U/V panes inside the framebuffer. */
-        switch (mPixelFormat) {
-            case V4L2_PIX_FMT_YVU420:
-                mFrameV = mCurrentFrame + mTotalPixels;
-                mFrameU = mFrameU + mTotalPixels / 4;
-                mUVStep = 1;
-                mUVTotalNum = mTotalPixels / 4;
-                break;
-
-            case V4L2_PIX_FMT_YUV420:
-                mFrameU = mCurrentFrame + mTotalPixels;
-                mFrameV = mFrameU + mTotalPixels / 4;
-                mUVStep = 1;
-                mUVTotalNum = mTotalPixels / 4;
-                break;
-
-            case V4L2_PIX_FMT_NV21:
-                /* Interleaved UV pane, V first. */
-                mFrameV = mCurrentFrame + mTotalPixels;
-                mFrameU = mFrameV + 1;
-                mUVStep = 2;
-                mUVTotalNum = mTotalPixels / 4;
-                break;
-
-            case V4L2_PIX_FMT_NV12:
-                /* Interleaved UV pane, U first. */
-                mFrameU = mCurrentFrame + mTotalPixels;
-                mFrameV = mFrameU + 1;
-                mUVStep = 2;
-                mUVTotalNum = mTotalPixels / 4;
-                break;
-
-            default:
-                ALOGE("%s: Unknown pixel format %.4s", __FUNCTION__,
-                     reinterpret_cast<const char*>(&mPixelFormat));
-                return EINVAL;
-        }
-        /* Number of items in a single row inside U/V panes. */
-        mUVInRow = (width / 2) * mUVStep;
-        mState = ECDS_STARTED;
-        mCurFrameTimestamp = 0;
-    } else {
-        ALOGE("%s: commonStartDevice failed", __FUNCTION__);
-    }
-
-    return res;
+  return res;
 }
 
-status_t EmulatedFakeCameraDevice::stopDevice()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedFakeCameraDevice::stopDevice() {
+  ALOGV("%s", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    if (!isStarted()) {
-        ALOGW("%s: Fake camera device is not started.", __FUNCTION__);
-        return NO_ERROR;
-    }
-
-    mFrameU = mFrameV = NULL;
-    EmulatedCameraDevice::commonStopDevice();
-    mState = ECDS_CONNECTED;
-
+  Mutex::Autolock locker(&mObjectLock);
+  if (!isStarted()) {
+    ALOGW("%s: Fake camera device is not started.", __FUNCTION__);
     return NO_ERROR;
+  }
+
+  mFrameU = mFrameV = NULL;
+  EmulatedCameraDevice::commonStopDevice();
+  mState = ECDS_CONNECTED;
+
+  return NO_ERROR;
 }
 
 /****************************************************************************
  * Worker thread management overrides.
  ***************************************************************************/
 
-bool EmulatedFakeCameraDevice::inWorkerThread()
-{
-    /* Wait till FPS timeout expires, or thread exit message is received. */
-    WorkerThread::SelectRes res =
-        getWorkerThread()->Select(-1, 1000000 / (mTargetFps / 1000));
-    if (res == WorkerThread::EXIT_THREAD) {
-        ALOGV("%s: Worker thread has been terminated.", __FUNCTION__);
-        return false;
-    }
+bool EmulatedFakeCameraDevice::inWorkerThread() {
+  /* Wait till FPS timeout expires, or thread exit message is received. */
+  WorkerThread::SelectRes res =
+      getWorkerThread()->Select(-1, 1000000 / (mTargetFps / 1000));
+  if (res == WorkerThread::EXIT_THREAD) {
+    ALOGV("%s: Worker thread has been terminated.", __FUNCTION__);
+    return false;
+  }
 
-    /* Lets see if we need to generate a new frame. */
-    if ((systemTime(SYSTEM_TIME_MONOTONIC) - mLastRedrawn) >= mRedrawAfter) {
-        /*
-         * Time to generate a new frame.
-         */
+  /* Lets see if we need to generate a new frame. */
+  if ((systemTime(SYSTEM_TIME_MONOTONIC) - mLastRedrawn) >= mRedrawAfter) {
+    /*
+     * Time to generate a new frame.
+     */
 
 #if EFCD_ROTATE_FRAME
-        const int frame_type = rotateFrame();
-        switch (frame_type) {
-            case 0:
-                drawCheckerboard();
-                break;
-            case 1:
-                drawStripes();
-                break;
-            case 2:
-                drawSolid(mCurrentColor);
-                break;
-        }
-#else
-        /* Draw the checker board. */
+    const int frame_type = rotateFrame();
+    switch (frame_type) {
+      case 0:
         drawCheckerboard();
+        break;
+      case 1:
+        drawStripes();
+        break;
+      case 2:
+        drawSolid(mCurrentColor);
+        break;
+    }
+#else
+    /* Draw the checker board. */
+    drawCheckerboard();
 
 #endif  // EFCD_ROTATE_FRAME
 
-        // mCurFrameTimestamp = systemTime(SYSTEM_TIME_MONOTONIC);
-        mCurFrameTimestamp += (1000000000 / (mTargetFps / 1000));
-        /* Timestamp the current frame, and notify the camera HAL about new frame. */
-        mLastRedrawn = systemTime(SYSTEM_TIME_MONOTONIC);
-        mCameraHAL->onNextFrameAvailable(mCurrentFrame, mCurFrameTimestamp, this);
-    }
+    // mCurFrameTimestamp = systemTime(SYSTEM_TIME_MONOTONIC);
+    mCurFrameTimestamp += (1000000000 / (mTargetFps / 1000));
+    /* Timestamp the current frame, and notify the camera HAL about new frame.
+     */
+    mLastRedrawn = systemTime(SYSTEM_TIME_MONOTONIC);
+    mCameraHAL->onNextFrameAvailable(mCurrentFrame, mCurFrameTimestamp, this);
+  }
 
-
-    return true;
+  return true;
 }
 
 /****************************************************************************
  * Fake camera device private API
  ***************************************************************************/
 
-void EmulatedFakeCameraDevice::drawCheckerboard()
-{
-    const int size = mFrameWidth / 10;
-    bool black = true;
+void EmulatedFakeCameraDevice::drawCheckerboard() {
+  const int size = mFrameWidth / 10;
+  bool black = true;
 
-    if (size == 0) {
-        // When this happens, it happens at a very high rate,
-        //     so don't log any messages and just return.
-        return;
+  if (size == 0) {
+    // When this happens, it happens at a very high rate,
+    //     so don't log any messages and just return.
+    return;
+  }
+
+  if ((mCheckX / size) & 1) black = false;
+  if ((mCheckY / size) & 1) black = !black;
+
+  int county = mCheckY % size;
+  int checkxremainder = mCheckX % size;
+  uint8_t* Y = mCurrentFrame;
+  uint8_t* U_pos = mFrameU;
+  uint8_t* V_pos = mFrameV;
+  uint8_t* U = U_pos;
+  uint8_t* V = V_pos;
+
+  YUVPixel adjustedWhite = YUVPixel(mWhiteYUV);
+  changeWhiteBalance(adjustedWhite.Y, adjustedWhite.U, adjustedWhite.V);
+
+  for (int y = 0; y < mFrameHeight; y++) {
+    int countx = checkxremainder;
+    bool current = black;
+    for (int x = 0; x < mFrameWidth; x += 2) {
+      if (current) {
+        mBlackYUV.get(Y, U, V);
+      } else {
+        adjustedWhite.get(Y, U, V);
+      }
+      *Y = changeExposure(*Y);
+      Y[1] = *Y;
+      Y += 2;
+      U += mUVStep;
+      V += mUVStep;
+      countx += 2;
+      if (countx >= size) {
+        countx = 0;
+        current = !current;
+      }
     }
-
-
-    if((mCheckX / size) & 1)
-        black = false;
-    if((mCheckY / size) & 1)
-        black = !black;
-
-    int county = mCheckY % size;
-    int checkxremainder = mCheckX % size;
-    uint8_t* Y = mCurrentFrame;
-    uint8_t* U_pos = mFrameU;
-    uint8_t* V_pos = mFrameV;
-    uint8_t* U = U_pos;
-    uint8_t* V = V_pos;
-
-    YUVPixel adjustedWhite = YUVPixel(mWhiteYUV);
-    changeWhiteBalance(adjustedWhite.Y, adjustedWhite.U, adjustedWhite.V);
-
-    for(int y = 0; y < mFrameHeight; y++) {
-        int countx = checkxremainder;
-        bool current = black;
-        for(int x = 0; x < mFrameWidth; x += 2) {
-            if (current) {
-                mBlackYUV.get(Y, U, V);
-            } else {
-                adjustedWhite.get(Y, U, V);
-            }
-            *Y = changeExposure(*Y);
-            Y[1] = *Y;
-            Y += 2; U += mUVStep; V += mUVStep;
-            countx += 2;
-            if(countx >= size) {
-                countx = 0;
-                current = !current;
-            }
-        }
-        if (y & 0x1) {
-            U_pos = U;
-            V_pos = V;
-        } else {
-            U = U_pos;
-            V = V_pos;
-        }
-        if(county++ >= size) {
-            county = 0;
-            black = !black;
-        }
+    if (y & 0x1) {
+      U_pos = U;
+      V_pos = V;
+    } else {
+      U = U_pos;
+      V = V_pos;
     }
-    mCheckX += 3;
-    mCheckY++;
+    if (county++ >= size) {
+      county = 0;
+      black = !black;
+    }
+  }
+  mCheckX += 3;
+  mCheckY++;
 
-    /* Run the square. */
-    int sqx = ((mCcounter * 3) & 255);
-    if(sqx > 128) sqx = 255 - sqx;
-    int sqy = ((mCcounter * 5) & 255);
-    if(sqy > 128) sqy = 255 - sqy;
-    const int sqsize = mFrameWidth / 10;
-    drawSquare(sqx * sqsize / 32, sqy * sqsize / 32, (sqsize * 5) >> 1,
-               (mCcounter & 0x100) ? &mRedYUV : &mGreenYUV);
-    mCcounter++;
+  /* Run the square. */
+  int sqx = ((mCcounter * 3) & 255);
+  if (sqx > 128) sqx = 255 - sqx;
+  int sqy = ((mCcounter * 5) & 255);
+  if (sqy > 128) sqy = 255 - sqy;
+  const int sqsize = mFrameWidth / 10;
+  drawSquare(sqx * sqsize / 32, sqy * sqsize / 32, (sqsize * 5) >> 1,
+             (mCcounter & 0x100) ? &mRedYUV : &mGreenYUV);
+  mCcounter++;
 }
 
-void EmulatedFakeCameraDevice::drawSquare(int x,
-                                          int y,
-                                          int size,
-                                          const YUVPixel* color)
-{
-    const int square_xstop = min(mFrameWidth, x + size);
-    const int square_ystop = min(mFrameHeight, y + size);
-    uint8_t* Y_pos = mCurrentFrame + y * mFrameWidth + x;
+void EmulatedFakeCameraDevice::drawSquare(int x, int y, int size,
+                                          const YUVPixel* color) {
+  const int square_xstop = min(mFrameWidth, x + size);
+  const int square_ystop = min(mFrameHeight, y + size);
+  uint8_t* Y_pos = mCurrentFrame + y * mFrameWidth + x;
 
-    YUVPixel adjustedColor = *color;
-    changeWhiteBalance(adjustedColor.Y, adjustedColor.U, adjustedColor.V);
+  YUVPixel adjustedColor = *color;
+  changeWhiteBalance(adjustedColor.Y, adjustedColor.U, adjustedColor.V);
 
-    // Draw the square.
-    for (; y < square_ystop; y++) {
-        const int iUV = (y / 2) * mUVInRow + (x / 2) * mUVStep;
-        uint8_t* sqU = mFrameU + iUV;
-        uint8_t* sqV = mFrameV + iUV;
-        uint8_t* sqY = Y_pos;
-        for (int i = x; i < square_xstop; i += 2) {
-            adjustedColor.get(sqY, sqU, sqV);
-            *sqY = changeExposure(*sqY);
-            sqY[1] = *sqY;
-            sqY += 2; sqU += mUVStep; sqV += mUVStep;
-        }
-        Y_pos += mFrameWidth;
+  // Draw the square.
+  for (; y < square_ystop; y++) {
+    const int iUV = (y / 2) * mUVInRow + (x / 2) * mUVStep;
+    uint8_t* sqU = mFrameU + iUV;
+    uint8_t* sqV = mFrameV + iUV;
+    uint8_t* sqY = Y_pos;
+    for (int i = x; i < square_xstop; i += 2) {
+      adjustedColor.get(sqY, sqU, sqV);
+      *sqY = changeExposure(*sqY);
+      sqY[1] = *sqY;
+      sqY += 2;
+      sqU += mUVStep;
+      sqV += mUVStep;
     }
+    Y_pos += mFrameWidth;
+  }
 }
 
 #if EFCD_ROTATE_FRAME
 
-void EmulatedFakeCameraDevice::drawSolid(YUVPixel* color)
-{
-    YUVPixel adjustedColor = *color;
-    changeWhiteBalance(adjustedColor.Y, adjustedColor.U, adjustedColor.V);
+void EmulatedFakeCameraDevice::drawSolid(YUVPixel* color) {
+  YUVPixel adjustedColor = *color;
+  changeWhiteBalance(adjustedColor.Y, adjustedColor.U, adjustedColor.V);
 
-    /* All Ys are the same. */
-    memset(mCurrentFrame, changeExposure(adjustedColor.Y), mTotalPixels);
+  /* All Ys are the same. */
+  memset(mCurrentFrame, changeExposure(adjustedColor.Y), mTotalPixels);
 
+  /* Fill U, and V panes. */
+  uint8_t* U = mFrameU;
+  uint8_t* V = mFrameV;
+  for (int k = 0; k < mUVTotalNum; k++, U += mUVStep, V += mUVStep) {
+    *U = color->U;
+    *V = color->V;
+  }
+}
+
+void EmulatedFakeCameraDevice::drawStripes() {
+  /* Divide frame into 4 stripes. */
+  const int change_color_at = mFrameHeight / 4;
+  const int each_in_row = mUVInRow / mUVStep;
+  uint8_t* pY = mCurrentFrame;
+  for (int y = 0; y < mFrameHeight; y++, pY += mFrameWidth) {
+    /* Select the color. */
+    YUVPixel* color;
+    const int color_index = y / change_color_at;
+    if (color_index == 0) {
+      /* White stripe on top. */
+      color = &mWhiteYUV;
+    } else if (color_index == 1) {
+      /* Then the red stripe. */
+      color = &mRedYUV;
+    } else if (color_index == 2) {
+      /* Then the green stripe. */
+      color = &mGreenYUV;
+    } else {
+      /* And the blue stripe at the bottom. */
+      color = &mBlueYUV;
+    }
+    changeWhiteBalance(color->Y, color->U, color->V);
+
+    /* All Ys at the row are the same. */
+    memset(pY, changeExposure(color->Y), mFrameWidth);
+
+    /* Offset of the current row inside U/V panes. */
+    const int uv_off = (y / 2) * mUVInRow;
     /* Fill U, and V panes. */
-    uint8_t* U = mFrameU;
-    uint8_t* V = mFrameV;
-    for (int k = 0; k < mUVTotalNum; k++, U += mUVStep, V += mUVStep) {
-        *U = color->U;
-        *V = color->V;
+    uint8_t* U = mFrameU + uv_off;
+    uint8_t* V = mFrameV + uv_off;
+    for (int k = 0; k < each_in_row; k++, U += mUVStep, V += mUVStep) {
+      *U = color->U;
+      *V = color->V;
     }
+  }
 }
 
-void EmulatedFakeCameraDevice::drawStripes()
-{
-    /* Divide frame into 4 stripes. */
-    const int change_color_at = mFrameHeight / 4;
-    const int each_in_row = mUVInRow / mUVStep;
-    uint8_t* pY = mCurrentFrame;
-    for (int y = 0; y < mFrameHeight; y++, pY += mFrameWidth) {
-        /* Select the color. */
-        YUVPixel* color;
-        const int color_index = y / change_color_at;
-        if (color_index == 0) {
-            /* White stripe on top. */
-            color = &mWhiteYUV;
-        } else if (color_index == 1) {
-            /* Then the red stripe. */
-            color = &mRedYUV;
-        } else if (color_index == 2) {
-            /* Then the green stripe. */
-            color = &mGreenYUV;
-        } else {
-            /* And the blue stripe at the bottom. */
-            color = &mBlueYUV;
-        }
-        changeWhiteBalance(color->Y, color->U, color->V);
-
-        /* All Ys at the row are the same. */
-        memset(pY, changeExposure(color->Y), mFrameWidth);
-
-        /* Offset of the current row inside U/V panes. */
-        const int uv_off = (y / 2) * mUVInRow;
-        /* Fill U, and V panes. */
-        uint8_t* U = mFrameU + uv_off;
-        uint8_t* V = mFrameV + uv_off;
-        for (int k = 0; k < each_in_row; k++, U += mUVStep, V += mUVStep) {
-            *U = color->U;
-            *V = color->V;
-        }
+int EmulatedFakeCameraDevice::rotateFrame() {
+  if ((systemTime(SYSTEM_TIME_MONOTONIC) - mLastRotatedAt) >= mRotateFreq) {
+    mLastRotatedAt = systemTime(SYSTEM_TIME_MONOTONIC);
+    mCurrentFrameType++;
+    if (mCurrentFrameType > 2) {
+      mCurrentFrameType = 0;
     }
-}
-
-int EmulatedFakeCameraDevice::rotateFrame()
-{
-    if ((systemTime(SYSTEM_TIME_MONOTONIC) - mLastRotatedAt) >= mRotateFreq) {
-        mLastRotatedAt = systemTime(SYSTEM_TIME_MONOTONIC);
-        mCurrentFrameType++;
-        if (mCurrentFrameType > 2) {
-            mCurrentFrameType = 0;
-        }
-        if (mCurrentFrameType == 2) {
-            ALOGD("********** Rotated to the SOLID COLOR frame **********");
-            /* Solid color: lets rotate color too. */
-            if (mCurrentColor == &mWhiteYUV) {
-                ALOGD("----- Painting a solid RED frame -----");
-                mCurrentColor = &mRedYUV;
-            } else if (mCurrentColor == &mRedYUV) {
-                ALOGD("----- Painting a solid GREEN frame -----");
-                mCurrentColor = &mGreenYUV;
-            } else if (mCurrentColor == &mGreenYUV) {
-                ALOGD("----- Painting a solid BLUE frame -----");
-                mCurrentColor = &mBlueYUV;
-            } else {
-                /* Back to white. */
-                ALOGD("----- Painting a solid WHITE frame -----");
-                mCurrentColor = &mWhiteYUV;
-            }
-        } else if (mCurrentFrameType == 0) {
-            ALOGD("********** Rotated to the CHECKERBOARD frame **********");
-        } else if (mCurrentFrameType == 1) {
-            ALOGD("********** Rotated to the STRIPED frame **********");
-        }
+    if (mCurrentFrameType == 2) {
+      ALOGD("********** Rotated to the SOLID COLOR frame **********");
+      /* Solid color: lets rotate color too. */
+      if (mCurrentColor == &mWhiteYUV) {
+        ALOGD("----- Painting a solid RED frame -----");
+        mCurrentColor = &mRedYUV;
+      } else if (mCurrentColor == &mRedYUV) {
+        ALOGD("----- Painting a solid GREEN frame -----");
+        mCurrentColor = &mGreenYUV;
+      } else if (mCurrentColor == &mGreenYUV) {
+        ALOGD("----- Painting a solid BLUE frame -----");
+        mCurrentColor = &mBlueYUV;
+      } else {
+        /* Back to white. */
+        ALOGD("----- Painting a solid WHITE frame -----");
+        mCurrentColor = &mWhiteYUV;
+      }
+    } else if (mCurrentFrameType == 0) {
+      ALOGD("********** Rotated to the CHECKERBOARD frame **********");
+    } else if (mCurrentFrameType == 1) {
+      ALOGD("********** Rotated to the STRIPED frame **********");
     }
+  }
 
-    return mCurrentFrameType;
+  return mCurrentFrameType;
 }
 
 #endif  // EFCD_ROTATE_FRAME
diff --git a/guest/hals/camera/EmulatedFakeCameraDevice.h b/guest/hals/camera/EmulatedFakeCameraDevice.h
index a02d301..3f48a78 100644
--- a/guest/hals/camera/EmulatedFakeCameraDevice.h
+++ b/guest/hals/camera/EmulatedFakeCameraDevice.h
@@ -25,14 +25,14 @@
 #include "Converters.h"
 #include "EmulatedCameraDevice.h"
 
-/* This is used for debugging format / conversion issues. If EFCD_ROTATE_FRAME is
- * set to 0, the frame content will be always the "checkerboard". Otherwise, if
- * EFCD_ROTATE_FRAME is set to a non-zero value, the frame content will "rotate"
- * from a "checkerboard" frame to a "white/red/green/blue stripes" frame, to a
- * "white/red/green/blue" frame. Frame content rotation helps finding bugs in
- * format conversions.
+/* This is used for debugging format / conversion issues. If EFCD_ROTATE_FRAME
+ * is set to 0, the frame content will be always the "checkerboard". Otherwise,
+ * if EFCD_ROTATE_FRAME is set to a non-zero value, the frame content will
+ * "rotate" from a "checkerboard" frame to a "white/red/green/blue stripes"
+ * frame, to a "white/red/green/blue" frame. Frame content rotation helps
+ * finding bugs in format conversions.
  */
-#define EFCD_ROTATE_FRAME   0
+#define EFCD_ROTATE_FRAME 0
 
 namespace android {
 
@@ -45,151 +45,148 @@
  * its color when bouncing off the 0,0 corner.
  */
 class EmulatedFakeCameraDevice : public EmulatedCameraDevice {
-public:
-    /* Constructs EmulatedFakeCameraDevice instance. */
-    explicit EmulatedFakeCameraDevice(EmulatedFakeCamera* camera_hal);
+ public:
+  /* Constructs EmulatedFakeCameraDevice instance. */
+  explicit EmulatedFakeCameraDevice(EmulatedFakeCamera* camera_hal);
 
-    /* Destructs EmulatedFakeCameraDevice instance. */
-    ~EmulatedFakeCameraDevice();
+  /* Destructs EmulatedFakeCameraDevice instance. */
+  ~EmulatedFakeCameraDevice();
 
-    /***************************************************************************
-     * Emulated camera device abstract interface implementation.
-     * See declarations of these methods in EmulatedCameraDevice class for
-     * information on each of these methods.
-     **************************************************************************/
+  /***************************************************************************
+   * Emulated camera device abstract interface implementation.
+   * See declarations of these methods in EmulatedCameraDevice class for
+   * information on each of these methods.
+   **************************************************************************/
 
-public:
-    /* Connects to the camera device.
-     * Since there is no real device to connect to, this method does nothing,
-     * but changes the state.
-     */
-    status_t connectDevice();
+ public:
+  /* Connects to the camera device.
+   * Since there is no real device to connect to, this method does nothing,
+   * but changes the state.
+   */
+  status_t connectDevice();
 
-    /* Disconnects from the camera device.
-     * Since there is no real device to disconnect from, this method does
-     * nothing, but changes the state.
-     */
-    status_t disconnectDevice();
+  /* Disconnects from the camera device.
+   * Since there is no real device to disconnect from, this method does
+   * nothing, but changes the state.
+   */
+  status_t disconnectDevice();
 
-    /* Starts the camera device. */
-    status_t startDevice(int width, int height, uint32_t pix_fmt, int fps);
+  /* Starts the camera device. */
+  status_t startDevice(int width, int height, uint32_t pix_fmt, int fps);
 
-    /* Stops the camera device. */
-    status_t stopDevice();
+  /* Stops the camera device. */
+  status_t stopDevice();
 
-    /* Gets current preview fame into provided buffer. */
-    status_t getPreviewFrame(void* buffer) {
-      return OK;
-    }
+  /* Gets current preview fame into provided buffer. */
+  status_t getPreviewFrame(void* buffer) { return OK; }
 
-    /***************************************************************************
-     * Worker thread management overrides.
-     * See declarations of these methods in EmulatedCameraDevice class for
-     * information on each of these methods.
-     **************************************************************************/
+  /***************************************************************************
+   * Worker thread management overrides.
+   * See declarations of these methods in EmulatedCameraDevice class for
+   * information on each of these methods.
+   **************************************************************************/
 
-protected:
-    /* Implementation of the worker thread routine.
-     * This method simply sleeps for a period of time defined by the FPS property
-     * of the fake camera (simulating frame frequency), and then calls emulated
-     * camera's onNextFrameAvailable method.
-     */
-    bool inWorkerThread();
+ protected:
+  /* Implementation of the worker thread routine.
+   * This method simply sleeps for a period of time defined by the FPS property
+   * of the fake camera (simulating frame frequency), and then calls emulated
+   * camera's onNextFrameAvailable method.
+   */
+  bool inWorkerThread();
 
-    /****************************************************************************
-     * Fake camera device private API
-     ***************************************************************************/
+  /****************************************************************************
+   * Fake camera device private API
+   ***************************************************************************/
 
-private:
+ private:
+  /* Draws a black and white checker board in the current frame buffer. */
+  void drawCheckerboard();
 
-    /* Draws a black and white checker board in the current frame buffer. */
-    void drawCheckerboard();
-
-    /* Draws a square of the given color in the current frame buffer.
-     * Param:
-     *  x, y - Coordinates of the top left corner of the square in the buffer.
-     *  size - Size of the square's side.
-     *  color - Square's color.
-     */
-    void drawSquare(int x, int y, int size, const YUVPixel* color);
+  /* Draws a square of the given color in the current frame buffer.
+   * Param:
+   *  x, y - Coordinates of the top left corner of the square in the buffer.
+   *  size - Size of the square's side.
+   *  color - Square's color.
+   */
+  void drawSquare(int x, int y, int size, const YUVPixel* color);
 
 #if EFCD_ROTATE_FRAME
-    void drawSolid(YUVPixel* color);
-    void drawStripes();
-    int rotateFrame();
+  void drawSolid(YUVPixel* color);
+  void drawStripes();
+  int rotateFrame();
 #endif  // EFCD_ROTATE_FRAME
 
-    /****************************************************************************
-     * Fake camera device data members
-     ***************************************************************************/
+  /****************************************************************************
+   * Fake camera device data members
+   ***************************************************************************/
 
-private:
-    /*
-     * Pixel colors in YUV format used when drawing the checker board.
-     */
+ private:
+  /*
+   * Pixel colors in YUV format used when drawing the checker board.
+   */
 
-    YUVPixel    mBlackYUV;
-    YUVPixel    mWhiteYUV;
-    YUVPixel    mRedYUV;
-    YUVPixel    mGreenYUV;
-    YUVPixel    mBlueYUV;
+  YUVPixel mBlackYUV;
+  YUVPixel mWhiteYUV;
+  YUVPixel mRedYUV;
+  YUVPixel mGreenYUV;
+  YUVPixel mBlueYUV;
 
-    /* Last time the frame has been redrawn. */
-    nsecs_t     mLastRedrawn;
+  /* Last time the frame has been redrawn. */
+  nsecs_t mLastRedrawn;
 
-    /*
-     * Precalculated values related to U/V panes.
-     */
+  /*
+   * Precalculated values related to U/V panes.
+   */
 
-    /* U pane inside the framebuffer. */
-    uint8_t*    mFrameU;
+  /* U pane inside the framebuffer. */
+  uint8_t* mFrameU;
 
-    /* V pane inside the framebuffer. */
-    uint8_t*    mFrameV;
+  /* V pane inside the framebuffer. */
+  uint8_t* mFrameV;
 
-    /* Defines byte distance between adjacent U, and V values. */
-    int         mUVStep;
+  /* Defines byte distance between adjacent U, and V values. */
+  int mUVStep;
 
-    /* Defines number of Us and Vs in a row inside the U/V panes.
-     * Note that if U/V panes are interleaved, this value reflects the total
-     * number of both, Us and Vs in a single row in the interleaved UV pane. */
-    int         mUVInRow;
+  /* Defines number of Us and Vs in a row inside the U/V panes.
+   * Note that if U/V panes are interleaved, this value reflects the total
+   * number of both, Us and Vs in a single row in the interleaved UV pane. */
+  int mUVInRow;
 
-    /* Total number of each, U, and V elements in the framebuffer. */
-    int         mUVTotalNum;
+  /* Total number of each, U, and V elements in the framebuffer. */
+  int mUVTotalNum;
 
-    /*
-     * Checkerboard drawing related stuff
-     */
+  /*
+   * Checkerboard drawing related stuff
+   */
 
-    int         mCheckX;
-    int         mCheckY;
-    int         mCcounter;
+  int mCheckX;
+  int mCheckY;
+  int mCcounter;
 
-    /* Defines time (in nanoseconds) between redrawing the checker board.
-     * We will redraw the checker board every 15 milliseconds. */
-    static const nsecs_t    mRedrawAfter = 15000000LL;
+  /* Defines time (in nanoseconds) between redrawing the checker board.
+   * We will redraw the checker board every 15 milliseconds. */
+  static const nsecs_t mRedrawAfter = 15000000LL;
 
 #if EFCD_ROTATE_FRAME
-    /* Frame rotation frequency in nanosec (currently - 3 sec) */
-    static const nsecs_t    mRotateFreq = 3000000000LL;
+  /* Frame rotation frequency in nanosec (currently - 3 sec) */
+  static const nsecs_t mRotateFreq = 3000000000LL;
 
-    /* Last time the frame has rotated. */
-    nsecs_t     mLastRotatedAt;
+  /* Last time the frame has rotated. */
+  nsecs_t mLastRotatedAt;
 
-    /* Type of the frame to display in the current rotation:
-     *  0 - Checkerboard.
-     *  1 - White/Red/Green/Blue horisontal stripes
-     *  2 - Solid color. */
-    int         mCurrentFrameType;
+  /* Type of the frame to display in the current rotation:
+   *  0 - Checkerboard.
+   *  1 - White/Red/Green/Blue horisontal stripes
+   *  2 - Solid color. */
+  int mCurrentFrameType;
 
-    /* Color to use to paint the solid color frame. Colors will rotate between
-     * white, red, gree, and blue each time rotation comes to the solid color
-     * frame. */
-    YUVPixel*   mCurrentColor;
+  /* Color to use to paint the solid color frame. Colors will rotate between
+   * white, red, gree, and blue each time rotation comes to the solid color
+   * frame. */
+  YUVPixel* mCurrentColor;
 #endif  // EFCD_ROTATE_FRAME
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_DEVICE_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_FAKE_CAMERA_DEVICE_H */
diff --git a/guest/hals/camera/EmulatedQemuCamera.cpp b/guest/hals/camera/EmulatedQemuCamera.cpp
index af1e324..cd8f670 100644
--- a/guest/hals/camera/EmulatedQemuCamera.cpp
+++ b/guest/hals/camera/EmulatedQemuCamera.cpp
@@ -21,21 +21,16 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_QemuCamera"
-#include <cutils/log.h>
 #include "EmulatedQemuCamera.h"
+#include <cutils/log.h>
 #include "EmulatedCameraFactory.h"
 
 namespace android {
 
 EmulatedQemuCamera::EmulatedQemuCamera(int cameraId, struct hw_module_t* module)
-        : EmulatedCamera(cameraId, module),
-          mQemuCameraDevice(this)
-{
-}
+    : EmulatedCamera(cameraId, module), mQemuCameraDevice(this) {}
 
-EmulatedQemuCamera::~EmulatedQemuCamera()
-{
-}
+EmulatedQemuCamera::~EmulatedQemuCamera() {}
 
 /****************************************************************************
  * EmulatedCamera virtual overrides.
@@ -43,77 +38,74 @@
 
 status_t EmulatedQemuCamera::Initialize(const char* device_name,
                                         const char* frame_dims,
-                                        const char* facing_dir)
-{
-    ALOGV("%s:\n   Name=%s\n   Facing '%s'\n   Dimensions=%s",
-         __FUNCTION__, device_name, facing_dir, frame_dims);
-    /* Save dimensions. */
-    mFrameDims = frame_dims;
+                                        const char* facing_dir) {
+  ALOGV("%s:\n   Name=%s\n   Facing '%s'\n   Dimensions=%s", __FUNCTION__,
+        device_name, facing_dir, frame_dims);
+  /* Save dimensions. */
+  mFrameDims = frame_dims;
 
-    /* Initialize camera device. */
-    status_t res = mQemuCameraDevice.Initialize(device_name);
-    if (res != NO_ERROR) {
-        return res;
-    }
+  /* Initialize camera device. */
+  status_t res = mQemuCameraDevice.Initialize(device_name);
+  if (res != NO_ERROR) {
+    return res;
+  }
 
-    /* Initialize base class. */
-    res = EmulatedCamera::Initialize();
-    if (res != NO_ERROR) {
-        return res;
-    }
+  /* Initialize base class. */
+  res = EmulatedCamera::Initialize();
+  if (res != NO_ERROR) {
+    return res;
+  }
 
-    /*
-     * Set customizable parameters.
-     */
+  /*
+   * Set customizable parameters.
+   */
 
-    mParameters.set(EmulatedCamera::FACING_KEY, facing_dir);
-    mParameters.set(EmulatedCamera::ORIENTATION_KEY,
-                    gEmulatedCameraFactory.getQemuCameraOrientation());
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES, frame_dims);
-    mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES, frame_dims);
+  mParameters.set(EmulatedCamera::FACING_KEY, facing_dir);
+  mParameters.set(EmulatedCamera::ORIENTATION_KEY,
+                  gEmulatedCameraFactory.getQemuCameraOrientation());
+  mParameters.set(CameraParameters::KEY_SUPPORTED_PICTURE_SIZES, frame_dims);
+  mParameters.set(CameraParameters::KEY_SUPPORTED_PREVIEW_SIZES, frame_dims);
 
-    /*
-     * Use first dimension reported by the device to set current preview and
-     * picture sizes.
-     */
+  /*
+   * Use first dimension reported by the device to set current preview and
+   * picture sizes.
+   */
 
-    char first_dim[128];
-    /* Dimensions are separated with ',' */
-    const char* c = strchr(frame_dims, ',');
-    if (c == NULL) {
-        strncpy(first_dim, frame_dims, sizeof(first_dim));
-        first_dim[sizeof(first_dim)-1] = '\0';
-    } else if (static_cast<size_t>(c - frame_dims) < sizeof(first_dim)) {
-        memcpy(first_dim, frame_dims, c - frame_dims);
-        first_dim[c - frame_dims] = '\0';
-    } else {
-        memcpy(first_dim, frame_dims, sizeof(first_dim));
-        first_dim[sizeof(first_dim)-1] = '\0';
-    }
+  char first_dim[128];
+  /* Dimensions are separated with ',' */
+  const char* c = strchr(frame_dims, ',');
+  if (c == NULL) {
+    strncpy(first_dim, frame_dims, sizeof(first_dim));
+    first_dim[sizeof(first_dim) - 1] = '\0';
+  } else if (static_cast<size_t>(c - frame_dims) < sizeof(first_dim)) {
+    memcpy(first_dim, frame_dims, c - frame_dims);
+    first_dim[c - frame_dims] = '\0';
+  } else {
+    memcpy(first_dim, frame_dims, sizeof(first_dim));
+    first_dim[sizeof(first_dim) - 1] = '\0';
+  }
 
-    /* Width and height are separated with 'x' */
-    char* sep = strchr(first_dim, 'x');
-    if (sep == NULL) {
-        ALOGE("%s: Invalid first dimension format in %s",
-             __FUNCTION__, frame_dims);
-        return EINVAL;
-    }
+  /* Width and height are separated with 'x' */
+  char* sep = strchr(first_dim, 'x');
+  if (sep == NULL) {
+    ALOGE("%s: Invalid first dimension format in %s", __FUNCTION__, frame_dims);
+    return EINVAL;
+  }
 
-    *sep = '\0';
-    const int x = atoi(first_dim);
-    const int y = atoi(sep + 1);
-    mParameters.setPreviewSize(x, y);
-    mParameters.setPictureSize(x, y);
+  *sep = '\0';
+  const int x = atoi(first_dim);
+  const int y = atoi(sep + 1);
+  mParameters.setPreviewSize(x, y);
+  mParameters.setPictureSize(x, y);
 
-    ALOGV("%s: Qemu camera %s is initialized. Current frame is %dx%d",
-         __FUNCTION__, device_name, x, y);
+  ALOGV("%s: Qemu camera %s is initialized. Current frame is %dx%d",
+        __FUNCTION__, device_name, x, y);
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
-EmulatedCameraDevice* EmulatedQemuCamera::getCameraDevice()
-{
-    return &mQemuCameraDevice;
+EmulatedCameraDevice* EmulatedQemuCamera::getCameraDevice() {
+  return &mQemuCameraDevice;
 }
 
-};  /* namespace android */
+}; /* namespace android */
diff --git a/guest/hals/camera/EmulatedQemuCamera.h b/guest/hals/camera/EmulatedQemuCamera.h
index 1b826c7..fe063bf 100644
--- a/guest/hals/camera/EmulatedQemuCamera.h
+++ b/guest/hals/camera/EmulatedQemuCamera.h
@@ -30,44 +30,43 @@
 /* Encapsulates functionality of an emulated camera connected to the host.
  */
 class EmulatedQemuCamera : public EmulatedCamera {
-public:
-    /* Constructs EmulatedQemuCamera instance. */
-    EmulatedQemuCamera(int cameraId, struct hw_module_t* module);
+ public:
+  /* Constructs EmulatedQemuCamera instance. */
+  EmulatedQemuCamera(int cameraId, struct hw_module_t* module);
 
-    /* Destructs EmulatedQemuCamera instance. */
-    ~EmulatedQemuCamera();
+  /* Destructs EmulatedQemuCamera instance. */
+  ~EmulatedQemuCamera();
 
-    /***************************************************************************
-     * EmulatedCamera virtual overrides.
-     **************************************************************************/
+  /***************************************************************************
+   * EmulatedCamera virtual overrides.
+   **************************************************************************/
 
-public:
-    /* Initializes EmulatedQemuCamera instance. */
-     status_t Initialize(const char* device_name,
-                         const char* frame_dims,
-                         const char* facing_dir);
+ public:
+  /* Initializes EmulatedQemuCamera instance. */
+  status_t Initialize(const char* device_name, const char* frame_dims,
+                      const char* facing_dir);
 
-    /***************************************************************************
-     * EmulatedCamera abstract API implementation.
-     **************************************************************************/
+  /***************************************************************************
+   * EmulatedCamera abstract API implementation.
+   **************************************************************************/
 
-protected:
-    /* Gets emulated camera device ised by this instance of the emulated camera.
-     */
-    EmulatedCameraDevice* getCameraDevice();
+ protected:
+  /* Gets emulated camera device ised by this instance of the emulated camera.
+   */
+  EmulatedCameraDevice* getCameraDevice();
 
-    /***************************************************************************
-     * Data memebers.
-     **************************************************************************/
+  /***************************************************************************
+   * Data memebers.
+   **************************************************************************/
 
-protected:
-    /* Contained qemu camera device object. */
-    EmulatedQemuCameraDevice    mQemuCameraDevice;
+ protected:
+  /* Contained qemu camera device object. */
+  EmulatedQemuCameraDevice mQemuCameraDevice;
 
-    /* Supported frame dimensions reported by the camera device. */
-    String8                     mFrameDims;
+  /* Supported frame dimensions reported by the camera device. */
+  String8 mFrameDims;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_H */
diff --git a/guest/hals/camera/EmulatedQemuCamera2.cpp b/guest/hals/camera/EmulatedQemuCamera2.cpp
index 2c94f0e..4eb8818 100644
--- a/guest/hals/camera/EmulatedQemuCamera2.cpp
+++ b/guest/hals/camera/EmulatedQemuCamera2.cpp
@@ -22,34 +22,26 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_QemuCamera2"
+#include "EmulatedQemuCamera2.h"
 #include <cutils/log.h>
 #include <cutils/properties.h>
-#include "EmulatedQemuCamera2.h"
 #include "EmulatedCameraFactory.h"
 
 namespace android {
 
-EmulatedQemuCamera2::EmulatedQemuCamera2(int cameraId,
-        bool facingBack,
-        struct hw_module_t* module)
-        : EmulatedCamera2(cameraId,module),
-          mFacingBack(facingBack)
-{
-    ALOGD("Constructing emulated qemu camera 2 facing %s",
-            facingBack ? "back" : "front");
+EmulatedQemuCamera2::EmulatedQemuCamera2(int cameraId, bool facingBack,
+                                         struct hw_module_t* module)
+    : EmulatedCamera2(cameraId, module), mFacingBack(facingBack) {
+  ALOGD("Constructing emulated qemu camera 2 facing %s",
+        facingBack ? "back" : "front");
 }
 
-EmulatedQemuCamera2::~EmulatedQemuCamera2()
-{
-}
+EmulatedQemuCamera2::~EmulatedQemuCamera2() {}
 
 /****************************************************************************
  * Public API overrides
  ***************************************************************************/
 
-status_t EmulatedQemuCamera2::Initialize()
-{
-    return NO_ERROR;
-}
+status_t EmulatedQemuCamera2::Initialize() { return NO_ERROR; }
 
-};  /* namespace android */
+}; /* namespace android */
diff --git a/guest/hals/camera/EmulatedQemuCamera2.h b/guest/hals/camera/EmulatedQemuCamera2.h
index 520ccce..7697c0e 100644
--- a/guest/hals/camera/EmulatedQemuCamera2.h
+++ b/guest/hals/camera/EmulatedQemuCamera2.h
@@ -27,40 +27,40 @@
 
 namespace android {
 
-/* Encapsulates functionality of an advanced fake camera based on real host camera data.
+/* Encapsulates functionality of an advanced fake camera based on real host
+ * camera data.
  */
 class EmulatedQemuCamera2 : public EmulatedCamera2 {
-public:
-    /* Constructs EmulatedFakeCamera instance. */
-    EmulatedQemuCamera2(int cameraId, bool facingBack, struct hw_module_t* module);
+ public:
+  /* Constructs EmulatedFakeCamera instance. */
+  EmulatedQemuCamera2(int cameraId, bool facingBack,
+                      struct hw_module_t* module);
 
-    /* Destructs EmulatedFakeCamera instance. */
-    ~EmulatedQemuCamera2();
+  /* Destructs EmulatedFakeCamera instance. */
+  ~EmulatedQemuCamera2();
 
-    /****************************************************************************
-     * EmulatedCamera2 virtual overrides.
-     ***************************************************************************/
+  /****************************************************************************
+   * EmulatedCamera2 virtual overrides.
+   ***************************************************************************/
 
-public:
-    /* Initializes EmulatedQemuCamera2 instance. */
-     status_t Initialize();
+ public:
+  /* Initializes EmulatedQemuCamera2 instance. */
+  status_t Initialize();
 
-    /****************************************************************************
-     * EmulatedCamera abstract API implementation.
-     ***************************************************************************/
+  /****************************************************************************
+   * EmulatedCamera abstract API implementation.
+   ***************************************************************************/
 
-protected:
+ protected:
+  /****************************************************************************
+   * Data memebers.
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Data memebers.
-     ***************************************************************************/
-
-protected:
-    /* Facing back (true) or front (false) switch. */
-    bool                        mFacingBack;
-
+ protected:
+  /* Facing back (true) or front (false) switch. */
+  bool mFacingBack;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA2_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA2_H */
diff --git a/guest/hals/camera/EmulatedQemuCameraDevice.cpp b/guest/hals/camera/EmulatedQemuCameraDevice.cpp
index ef5406b..0547010 100644
--- a/guest/hals/camera/EmulatedQemuCameraDevice.cpp
+++ b/guest/hals/camera/EmulatedQemuCameraDevice.cpp
@@ -21,247 +21,231 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_QemuDevice"
+#include "EmulatedQemuCameraDevice.h"
 #include <cutils/log.h>
 #include "EmulatedQemuCamera.h"
-#include "EmulatedQemuCameraDevice.h"
 
 namespace android {
 
-EmulatedQemuCameraDevice::EmulatedQemuCameraDevice(EmulatedQemuCamera* camera_hal)
-    : EmulatedCameraDevice(camera_hal),
-      mQemuClient(),
-      mPreviewFrame(NULL)
-{
-}
+EmulatedQemuCameraDevice::EmulatedQemuCameraDevice(
+    EmulatedQemuCamera* camera_hal)
+    : EmulatedCameraDevice(camera_hal), mQemuClient(), mPreviewFrame(NULL) {}
 
-EmulatedQemuCameraDevice::~EmulatedQemuCameraDevice()
-{
-    if (mPreviewFrame != NULL) {
-        delete[] mPreviewFrame;
-    }
+EmulatedQemuCameraDevice::~EmulatedQemuCameraDevice() {
+  if (mPreviewFrame != NULL) {
+    delete[] mPreviewFrame;
+  }
 }
 
 /****************************************************************************
  * Public API
  ***************************************************************************/
 
-status_t EmulatedQemuCameraDevice::Initialize(const char* device_name)
-{
-    /* Connect to the service. */
-    char connect_str[256];
-    snprintf(connect_str, sizeof(connect_str), "name=%s", device_name);
-    status_t res = mQemuClient.connectClient(connect_str);
-    if (res != NO_ERROR) {
-        return res;
-    }
-
-    /* Initialize base class. */
-    res = EmulatedCameraDevice::Initialize();
-    if (res == NO_ERROR) {
-        ALOGV("%s: Connected to the emulated camera service '%s'",
-             __FUNCTION__, device_name);
-        mDeviceName = device_name;
-    } else {
-        mQemuClient.queryDisconnect();
-    }
-
+status_t EmulatedQemuCameraDevice::Initialize(const char* device_name) {
+  /* Connect to the service. */
+  char connect_str[256];
+  snprintf(connect_str, sizeof(connect_str), "name=%s", device_name);
+  status_t res = mQemuClient.connectClient(connect_str);
+  if (res != NO_ERROR) {
     return res;
+  }
+
+  /* Initialize base class. */
+  res = EmulatedCameraDevice::Initialize();
+  if (res == NO_ERROR) {
+    ALOGV("%s: Connected to the emulated camera service '%s'", __FUNCTION__,
+          device_name);
+    mDeviceName = device_name;
+  } else {
+    mQemuClient.queryDisconnect();
+  }
+
+  return res;
 }
 
 /****************************************************************************
  * Emulated camera device abstract interface implementation.
  ***************************************************************************/
 
-status_t EmulatedQemuCameraDevice::connectDevice()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedQemuCameraDevice::connectDevice() {
+  ALOGV("%s", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    if (!isInitialized()) {
-        ALOGE("%s: Qemu camera device is not initialized.", __FUNCTION__);
-        return EINVAL;
-    }
-    if (isConnected()) {
-        ALOGW("%s: Qemu camera device '%s' is already connected.",
-             __FUNCTION__, (const char*)mDeviceName);
-        return NO_ERROR;
-    }
+  Mutex::Autolock locker(&mObjectLock);
+  if (!isInitialized()) {
+    ALOGE("%s: Qemu camera device is not initialized.", __FUNCTION__);
+    return EINVAL;
+  }
+  if (isConnected()) {
+    ALOGW("%s: Qemu camera device '%s' is already connected.", __FUNCTION__,
+          (const char*)mDeviceName);
+    return NO_ERROR;
+  }
 
-    /* Connect to the camera device via emulator. */
-    const status_t res = mQemuClient.queryConnect();
-    if (res == NO_ERROR) {
-        ALOGV("%s: Connected to device '%s'",
-             __FUNCTION__, (const char*)mDeviceName);
-        mState = ECDS_CONNECTED;
-    } else {
-        ALOGE("%s: Connection to device '%s' failed",
-             __FUNCTION__, (const char*)mDeviceName);
-    }
+  /* Connect to the camera device via emulator. */
+  const status_t res = mQemuClient.queryConnect();
+  if (res == NO_ERROR) {
+    ALOGV("%s: Connected to device '%s'", __FUNCTION__,
+          (const char*)mDeviceName);
+    mState = ECDS_CONNECTED;
+  } else {
+    ALOGE("%s: Connection to device '%s' failed", __FUNCTION__,
+          (const char*)mDeviceName);
+  }
 
-    return res;
+  return res;
 }
 
-status_t EmulatedQemuCameraDevice::disconnectDevice()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedQemuCameraDevice::disconnectDevice() {
+  ALOGV("%s", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    if (!isConnected()) {
-        ALOGW("%s: Qemu camera device '%s' is already disconnected.",
-             __FUNCTION__, (const char*)mDeviceName);
-        return NO_ERROR;
-    }
-    if (isStarted()) {
-        ALOGE("%s: Cannot disconnect from the started device '%s.",
-             __FUNCTION__, (const char*)mDeviceName);
-        return EINVAL;
-    }
+  Mutex::Autolock locker(&mObjectLock);
+  if (!isConnected()) {
+    ALOGW("%s: Qemu camera device '%s' is already disconnected.", __FUNCTION__,
+          (const char*)mDeviceName);
+    return NO_ERROR;
+  }
+  if (isStarted()) {
+    ALOGE("%s: Cannot disconnect from the started device '%s.", __FUNCTION__,
+          (const char*)mDeviceName);
+    return EINVAL;
+  }
 
-    /* Disconnect from the camera device via emulator. */
-    const status_t res = mQemuClient.queryDisconnect();
-    if (res == NO_ERROR) {
-        ALOGV("%s: Disonnected from device '%s'",
-             __FUNCTION__, (const char*)mDeviceName);
-        mState = ECDS_INITIALIZED;
-    } else {
-        ALOGE("%s: Disconnection from device '%s' failed",
-             __FUNCTION__, (const char*)mDeviceName);
-    }
+  /* Disconnect from the camera device via emulator. */
+  const status_t res = mQemuClient.queryDisconnect();
+  if (res == NO_ERROR) {
+    ALOGV("%s: Disonnected from device '%s'", __FUNCTION__,
+          (const char*)mDeviceName);
+    mState = ECDS_INITIALIZED;
+  } else {
+    ALOGE("%s: Disconnection from device '%s' failed", __FUNCTION__,
+          (const char*)mDeviceName);
+  }
 
-    return res;
+  return res;
 }
 
-status_t EmulatedQemuCameraDevice::startDevice(int width,
-                                               int height,
-                                               uint32_t pix_fmt,
-                                               int fps)
-{
-    ALOGV("%s", __FUNCTION__);
+status_t EmulatedQemuCameraDevice::startDevice(int width, int height,
+                                               uint32_t pix_fmt, int fps) {
+  ALOGV("%s", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    if (!isConnected()) {
-        ALOGE("%s: Qemu camera device '%s' is not connected.",
-             __FUNCTION__, (const char*)mDeviceName);
-        return EINVAL;
-    }
-    if (isStarted()) {
-        ALOGW("%s: Qemu camera device '%s' is already started.",
-             __FUNCTION__, (const char*)mDeviceName);
-        return NO_ERROR;
-    }
+  Mutex::Autolock locker(&mObjectLock);
+  if (!isConnected()) {
+    ALOGE("%s: Qemu camera device '%s' is not connected.", __FUNCTION__,
+          (const char*)mDeviceName);
+    return EINVAL;
+  }
+  if (isStarted()) {
+    ALOGW("%s: Qemu camera device '%s' is already started.", __FUNCTION__,
+          (const char*)mDeviceName);
+    return NO_ERROR;
+  }
 
-    status_t res = EmulatedCameraDevice::commonStartDevice(
-        width, height, pix_fmt, fps);
-    if (res != NO_ERROR) {
-        ALOGE("%s: commonStartDevice failed", __FUNCTION__);
-        return res;
-    }
+  status_t res =
+      EmulatedCameraDevice::commonStartDevice(width, height, pix_fmt, fps);
+  if (res != NO_ERROR) {
+    ALOGE("%s: commonStartDevice failed", __FUNCTION__);
+    return res;
+  }
 
-    /* Allocate preview frame buffer. */
-    /* TODO: Watch out for preview format changes! At this point we implement
-     * RGB32 only.*/
-    mPreviewFrame = new uint32_t[mTotalPixels];
+  /* Allocate preview frame buffer. */
+  /* TODO: Watch out for preview format changes! At this point we implement
+   * RGB32 only.*/
+  mPreviewFrame = new uint32_t[mTotalPixels];
+  if (mPreviewFrame == NULL) {
+    ALOGE("%s: Unable to allocate %d bytes for preview frame", __FUNCTION__,
+          mTotalPixels);
+    return ENOMEM;
+  }
+
+  /* Start the actual camera device. */
+  res = mQemuClient.queryStart(mPixelFormat, mFrameWidth, mFrameHeight);
+  if (res == NO_ERROR) {
+    ALOGV("%s: Qemu camera device '%s' is started for %.4s[%dx%d] frames",
+          __FUNCTION__, (const char*)mDeviceName,
+          reinterpret_cast<const char*>(&mPixelFormat), mFrameWidth,
+          mFrameHeight);
+    mState = ECDS_STARTED;
+  } else {
+    ALOGE("%s: Unable to start device '%s' for %.4s[%dx%d] frames",
+          __FUNCTION__, (const char*)mDeviceName,
+          reinterpret_cast<const char*>(&pix_fmt), width, height);
+  }
+
+  return res;
+}
+
+status_t EmulatedQemuCameraDevice::stopDevice() {
+  ALOGV("%s", __FUNCTION__);
+
+  Mutex::Autolock locker(&mObjectLock);
+  if (!isStarted()) {
+    ALOGW("%s: Qemu camera device '%s' is not started.", __FUNCTION__,
+          (const char*)mDeviceName);
+    return NO_ERROR;
+  }
+
+  /* Stop the actual camera device. */
+  status_t res = mQemuClient.queryStop();
+  if (res == NO_ERROR) {
     if (mPreviewFrame == NULL) {
-        ALOGE("%s: Unable to allocate %d bytes for preview frame",
-             __FUNCTION__, mTotalPixels);
-        return ENOMEM;
+      delete[] mPreviewFrame;
+      mPreviewFrame = NULL;
     }
+    EmulatedCameraDevice::commonStopDevice();
+    mState = ECDS_CONNECTED;
+    ALOGV("%s: Qemu camera device '%s' is stopped", __FUNCTION__,
+          (const char*)mDeviceName);
+  } else {
+    ALOGE("%s: Unable to stop device '%s'", __FUNCTION__,
+          (const char*)mDeviceName);
+  }
 
-    /* Start the actual camera device. */
-    res = mQemuClient.queryStart(mPixelFormat, mFrameWidth, mFrameHeight);
-    if (res == NO_ERROR) {
-        ALOGV("%s: Qemu camera device '%s' is started for %.4s[%dx%d] frames",
-             __FUNCTION__, (const char*)mDeviceName,
-             reinterpret_cast<const char*>(&mPixelFormat),
-             mFrameWidth, mFrameHeight);
-        mState = ECDS_STARTED;
-    } else {
-        ALOGE("%s: Unable to start device '%s' for %.4s[%dx%d] frames",
-             __FUNCTION__, (const char*)mDeviceName,
-             reinterpret_cast<const char*>(&pix_fmt), width, height);
-    }
-
-    return res;
-}
-
-status_t EmulatedQemuCameraDevice::stopDevice()
-{
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock locker(&mObjectLock);
-    if (!isStarted()) {
-        ALOGW("%s: Qemu camera device '%s' is not started.",
-             __FUNCTION__, (const char*)mDeviceName);
-        return NO_ERROR;
-    }
-
-    /* Stop the actual camera device. */
-    status_t res = mQemuClient.queryStop();
-    if (res == NO_ERROR) {
-        if (mPreviewFrame == NULL) {
-            delete[] mPreviewFrame;
-            mPreviewFrame = NULL;
-        }
-        EmulatedCameraDevice::commonStopDevice();
-        mState = ECDS_CONNECTED;
-        ALOGV("%s: Qemu camera device '%s' is stopped",
-             __FUNCTION__, (const char*)mDeviceName);
-    } else {
-        ALOGE("%s: Unable to stop device '%s'",
-             __FUNCTION__, (const char*)mDeviceName);
-    }
-
-    return res;
+  return res;
 }
 
 /****************************************************************************
  * EmulatedCameraDevice virtual overrides
  ***************************************************************************/
 
-status_t EmulatedQemuCameraDevice::getCurrentPreviewFrame(void* buffer)
-{
-    ALOGW_IF(mPreviewFrame == NULL, "%s: No preview frame", __FUNCTION__);
-    if (mPreviewFrame != NULL) {
-        memcpy(buffer, mPreviewFrame, mTotalPixels * 4);
-        return 0;
-    } else {
-        return EmulatedCameraDevice::getCurrentPreviewFrame(buffer);
-    }
+status_t EmulatedQemuCameraDevice::getCurrentPreviewFrame(void* buffer) {
+  ALOGW_IF(mPreviewFrame == NULL, "%s: No preview frame", __FUNCTION__);
+  if (mPreviewFrame != NULL) {
+    memcpy(buffer, mPreviewFrame, mTotalPixels * 4);
+    return 0;
+  } else {
+    return EmulatedCameraDevice::getCurrentPreviewFrame(buffer);
+  }
 }
 
 /****************************************************************************
  * Worker thread management overrides.
  ***************************************************************************/
 
-bool EmulatedQemuCameraDevice::inWorkerThread()
-{
-    /* Wait till FPS timeout expires, or thread exit message is received. */
-    WorkerThread::SelectRes res =
-        getWorkerThread()->Select(-1, 1000000 / mEmulatedFPS);
-    if (res == WorkerThread::EXIT_THREAD) {
-        ALOGV("%s: Worker thread has been terminated.", __FUNCTION__);
-        return false;
-    }
+bool EmulatedQemuCameraDevice::inWorkerThread() {
+  /* Wait till FPS timeout expires, or thread exit message is received. */
+  WorkerThread::SelectRes res =
+      getWorkerThread()->Select(-1, 1000000 / mEmulatedFPS);
+  if (res == WorkerThread::EXIT_THREAD) {
+    ALOGV("%s: Worker thread has been terminated.", __FUNCTION__);
+    return false;
+  }
 
-    /* Query frames from the service. */
-    status_t query_res = mQemuClient.queryFrame(mCurrentFrame, mPreviewFrame,
-                                                 mFrameBufferSize,
-                                                 mTotalPixels * 4,
-                                                 mWhiteBalanceScale[0],
-                                                 mWhiteBalanceScale[1],
-                                                 mWhiteBalanceScale[2],
-                                                 mExposureCompensation);
-    if (query_res == NO_ERROR) {
-        /* Timestamp the current frame, and notify the camera HAL. */
-        mCurFrameTimestamp = systemTime(SYSTEM_TIME_MONOTONIC);
-        mCameraHAL->onNextFrameAvailable(mCurrentFrame, mCurFrameTimestamp, this);
-        return true;
-    } else {
-        ALOGE("%s: Unable to get current video frame: %s",
-             __FUNCTION__, strerror(query_res));
-        mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
-        return false;
-    }
+  /* Query frames from the service. */
+  status_t query_res = mQemuClient.queryFrame(
+      mCurrentFrame, mPreviewFrame, mFrameBufferSize, mTotalPixels * 4,
+      mWhiteBalanceScale[0], mWhiteBalanceScale[1], mWhiteBalanceScale[2],
+      mExposureCompensation);
+  if (query_res == NO_ERROR) {
+    /* Timestamp the current frame, and notify the camera HAL. */
+    mCurFrameTimestamp = systemTime(SYSTEM_TIME_MONOTONIC);
+    mCameraHAL->onNextFrameAvailable(mCurrentFrame, mCurFrameTimestamp, this);
+    return true;
+  } else {
+    ALOGE("%s: Unable to get current video frame: %s", __FUNCTION__,
+          strerror(query_res));
+    mCameraHAL->onCameraDeviceError(CAMERA_ERROR_SERVER_DIED);
+    return false;
+  }
 }
 
 }; /* namespace android */
diff --git a/guest/hals/camera/EmulatedQemuCameraDevice.h b/guest/hals/camera/EmulatedQemuCameraDevice.h
index 6664f44..2e4770b 100644
--- a/guest/hals/camera/EmulatedQemuCameraDevice.h
+++ b/guest/hals/camera/EmulatedQemuCameraDevice.h
@@ -32,90 +32,90 @@
 /* Encapsulates an emulated camera device connected to the host.
  */
 class EmulatedQemuCameraDevice : public EmulatedCameraDevice {
-public:
-    /* Constructs EmulatedQemuCameraDevice instance. */
-    explicit EmulatedQemuCameraDevice(EmulatedQemuCamera* camera_hal);
+ public:
+  /* Constructs EmulatedQemuCameraDevice instance. */
+  explicit EmulatedQemuCameraDevice(EmulatedQemuCamera* camera_hal);
 
-    /* Destructs EmulatedQemuCameraDevice instance. */
-    ~EmulatedQemuCameraDevice();
+  /* Destructs EmulatedQemuCameraDevice instance. */
+  ~EmulatedQemuCameraDevice();
 
-    /***************************************************************************
-     * Public API
-     **************************************************************************/
+  /***************************************************************************
+   * Public API
+   **************************************************************************/
 
-public:
-    /* Initializes EmulatedQemuCameraDevice instance.
-     * Param:
-     *  device_name - Name of the camera device connected to the host. The name
-     *      that is used here must have been reported by the 'factory' camera
-     *      service when it listed camera devices connected to the host.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    status_t Initialize(const char* device_name);
+ public:
+  /* Initializes EmulatedQemuCameraDevice instance.
+   * Param:
+   *  device_name - Name of the camera device connected to the host. The name
+   *      that is used here must have been reported by the 'factory' camera
+   *      service when it listed camera devices connected to the host.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  status_t Initialize(const char* device_name);
 
-    /***************************************************************************
-     * Emulated camera device abstract interface implementation.
-     * See declarations of these methods in EmulatedCameraDevice class for
-     * information on each of these methods.
-     **************************************************************************/
+  /***************************************************************************
+   * Emulated camera device abstract interface implementation.
+   * See declarations of these methods in EmulatedCameraDevice class for
+   * information on each of these methods.
+   **************************************************************************/
 
-public:
-    /* Connects to the camera device. */
-    status_t connectDevice();
+ public:
+  /* Connects to the camera device. */
+  status_t connectDevice();
 
-    /* Disconnects from the camera device. */
-    status_t disconnectDevice();
+  /* Disconnects from the camera device. */
+  status_t disconnectDevice();
 
-    /* Starts capturing frames from the camera device. */
-    status_t startDevice(int width, int height, uint32_t pix_fmt, int fps);
+  /* Starts capturing frames from the camera device. */
+  status_t startDevice(int width, int height, uint32_t pix_fmt, int fps);
 
-    /* Stops capturing frames from the camera device. */
-    status_t stopDevice();
+  /* Stops capturing frames from the camera device. */
+  status_t stopDevice();
 
-    /***************************************************************************
-     * EmulatedCameraDevice virtual overrides
-     * See declarations of these methods in EmulatedCameraDevice class for
-     * information on each of these methods.
-     **************************************************************************/
+  /***************************************************************************
+   * EmulatedCameraDevice virtual overrides
+   * See declarations of these methods in EmulatedCameraDevice class for
+   * information on each of these methods.
+   **************************************************************************/
 
-public:
-    /* Gets current preview fame into provided buffer.
-     * We override this method in order to provide preview frames cached in this
-     * object.
-     */
-    status_t getCurrentPreviewFrame(void* buffer);
+ public:
+  /* Gets current preview fame into provided buffer.
+   * We override this method in order to provide preview frames cached in this
+   * object.
+   */
+  status_t getCurrentPreviewFrame(void* buffer);
 
-    /***************************************************************************
-     * Worker thread management overrides.
-     * See declarations of these methods in EmulatedCameraDevice class for
-     * information on each of these methods.
-     **************************************************************************/
+  /***************************************************************************
+   * Worker thread management overrides.
+   * See declarations of these methods in EmulatedCameraDevice class for
+   * information on each of these methods.
+   **************************************************************************/
 
-protected:
-    /* Implementation of the worker thread routine. */
-    bool inWorkerThread();
+ protected:
+  /* Implementation of the worker thread routine. */
+  bool inWorkerThread();
 
-    /***************************************************************************
-     * Qemu camera device data members
-     **************************************************************************/
+  /***************************************************************************
+   * Qemu camera device data members
+   **************************************************************************/
 
-private:
-    /* Qemu client that is used to communicate with the 'emulated camera'
-     * service, created for this instance in the emulator. */
-    CameraQemuClient    mQemuClient;
+ private:
+  /* Qemu client that is used to communicate with the 'emulated camera'
+   * service, created for this instance in the emulator. */
+  CameraQemuClient mQemuClient;
 
-    /* Name of the camera device connected to the host. */
-    String8             mDeviceName;
+  /* Name of the camera device connected to the host. */
+  String8 mDeviceName;
 
-    /* Current preview framebuffer. */
-    uint32_t*           mPreviewFrame;
+  /* Current preview framebuffer. */
+  uint32_t* mPreviewFrame;
 
-    /* Emulated FPS (frames per second).
-     * We will emulate 50 FPS. */
-    static const int    mEmulatedFPS = 50;
+  /* Emulated FPS (frames per second).
+   * We will emulate 50 FPS. */
+  static const int mEmulatedFPS = 50;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_DEVICE_H */
+#endif /* HW_EMULATOR_CAMERA_EMULATED_QEMU_CAMERA_DEVICE_H */
diff --git a/guest/hals/camera/ExifMetadataBuilder.cpp b/guest/hals/camera/ExifMetadataBuilder.cpp
index 71eaebc..084f029 100644
--- a/guest/hals/camera/ExifMetadataBuilder.cpp
+++ b/guest/hals/camera/ExifMetadataBuilder.cpp
@@ -4,22 +4,22 @@
 #define LOG_TAG "ExifMetadataBuilder"
 #include <cutils/log.h>
 
-#include <cmath>
 #include <stdlib.h>
+#include <cmath>
 
 namespace android {
 // All supported EXIF data types.
 enum ExifDataType {
-  ExifUInt8     = 1,
-  ExifString    = 2,
-  ExifUInt16    = 3,
-  ExifUInt32    = 4,
-  ExifRational  = 5,
+  ExifUInt8 = 1,
+  ExifString = 2,
+  ExifUInt16 = 3,
+  ExifUInt32 = 4,
+  ExifRational = 5,
   ExifUndefined = 7,
-  ExifSInt16    = 8,
-  ExifSInt32    = 9,
-  ExifFloat     = 11,
-  ExifDouble    = 12,
+  ExifSInt16 = 8,
+  ExifSInt32 = 9,
+  ExifFloat = 11,
+  ExifDouble = 12,
 };
 
 enum ExifTagId {
@@ -84,9 +84,9 @@
     // - uint16_t: mTags.size();
     // - ExifTagInfo[mTags.size()]
     // - uint32_t: next_structure_available ? self_offset + Size() : NULL
-    return sizeof(uint16_t)                       // mTags.size()
-           + mTags.size() * sizeof(ExifTagInfo)   // [tags]
-           + sizeof(uint32_t);                    // next offset
+    return sizeof(uint16_t)                      // mTags.size()
+           + mTags.size() * sizeof(ExifTagInfo)  // [tags]
+           + sizeof(uint32_t);                   // next offset
   }
 
   size_t DataSize() {
@@ -97,14 +97,10 @@
     return data_size;
   }
 
-  size_t Size() {
-    return TagSize() + DataSize();
-  }
+  size_t Size() { return TagSize() + DataSize(); }
 
-  uint32_t Build(
-      uint8_t* buffer,
-      const uint32_t self_offset,
-      const bool next_structure_available) {
+  uint32_t Build(uint8_t* buffer, const uint32_t self_offset,
+                 const bool next_structure_available) {
     // Write number of items.
     uint16_t num_elements = mTags.size();
     memcpy(buffer, &num_elements, sizeof(num_elements));
@@ -138,15 +134,12 @@
     return offset;
   }
 
-  void PushTag(ExifTag* tag) {
-    mTags.push_back(tag);
-  }
+  void PushTag(ExifTag* tag) { mTags.push_back(tag); }
 
  private:
   TagMap mTags;
 };
 
-
 // EXIF tags.
 namespace {
 // Tag with 8-bit unsigned integer.
@@ -187,9 +180,7 @@
 class ExifCharArrayTag : public ExifTag {
  public:
   ExifCharArrayTag(uint16_t tag, const char (&type)[8], const std::string& str)
-      : mTag(tag),
-        mType(type),
-        mString(str) {}
+      : mTag(tag), mType(type), mString(str) {}
 
   void AppendTag(ExifTagInfo* info, size_t data_offset) {
     info->tag = mTag;
@@ -198,9 +189,7 @@
     info->value = data_offset;
   }
 
-  size_t DataSize() {
-    return sizeof(mType) + mString.size();
-  }
+  size_t DataSize() { return sizeof(mType) + mString.size(); }
 
   void AppendData(uint8_t* data) {
     memcpy(data, mType, sizeof(mType));
@@ -218,13 +207,9 @@
 class ExifPointerTag : public ExifTag {
  public:
   ExifPointerTag(uint16_t tag, void* data, int size)
-      : mTag(tag),
-        mData(data),
-        mSize(size) {}
+      : mTag(tag), mData(data), mSize(size) {}
 
-  ~ExifPointerTag() {
-    free(mData);
-  }
+  ~ExifPointerTag() { free(mData); }
 
   void AppendTag(ExifTagInfo* info, size_t data_offset) {
     info->tag = mTag;
@@ -233,13 +218,9 @@
     info->value = data_offset;
   }
 
-  size_t DataSize() {
-    return mSize;
-  }
+  size_t DataSize() { return mSize; }
 
-  void AppendData(uint8_t* data) {
-    memcpy(data, mData, mSize);
-  }
+  void AppendData(uint8_t* data) { memcpy(data, mData, mSize); }
 
  private:
   uint16_t mTag;
@@ -251,8 +232,7 @@
 class ExifStringTag : public ExifTag {
  public:
   ExifStringTag(uint16_t tag, const std::string& str)
-      : mTag(tag),
-        mString(str) {}
+      : mTag(tag), mString(str) {}
 
   void AppendTag(ExifTagInfo* info, size_t data_offset) {
     info->tag = mTag;
@@ -266,9 +246,7 @@
     return mString.size() + 1;
   }
 
-  void AppendData(uint8_t* data) {
-    memcpy(data, mString.data(), DataSize());
-  }
+  void AppendData(uint8_t* data) { memcpy(data, mString.data(), DataSize()); }
 
  private:
   uint16_t mTag;
@@ -278,13 +256,9 @@
 // SubIFD: sub-tags.
 class ExifSubIfdTag : public ExifTag {
  public:
-  ExifSubIfdTag(uint16_t tag)
-      : mTag(tag),
-        mSubStructure(new ExifStructure) {}
+  ExifSubIfdTag(uint16_t tag) : mTag(tag), mSubStructure(new ExifStructure) {}
 
-  ~ExifSubIfdTag() {
-    delete mSubStructure;
-  }
+  ~ExifSubIfdTag() { delete mSubStructure; }
 
   void AppendTag(ExifTagInfo* info, size_t data_offset) {
     info->tag = mTag;
@@ -294,17 +268,13 @@
     mDataOffset = data_offset;
   }
 
-  size_t DataSize() {
-    return mSubStructure->Size();
-  }
+  size_t DataSize() { return mSubStructure->Size(); }
 
   void AppendData(uint8_t* data) {
     mSubStructure->Build(data, mDataOffset, false);
   }
 
-  ExifStructure* GetSubStructure() {
-    return mSubStructure;
-  }
+  ExifStructure* GetSubStructure() { return mSubStructure; }
 
  private:
   uint16_t mTag;
@@ -315,26 +285,23 @@
 // Unsigned rational tag.
 class ExifURationalTag : public ExifTag {
  public:
-  ExifURationalTag(uint16_t tag, double value)
-      : mTag(tag),
-        mCount(1) {
-    DoubleToRational(value,
-                     &mRationals[0].mNumerator, &mRationals[0].mDenominator);
+  ExifURationalTag(uint16_t tag, double value) : mTag(tag), mCount(1) {
+    DoubleToRational(value, &mRationals[0].mNumerator,
+                     &mRationals[0].mDenominator);
   }
 
   ExifURationalTag(uint16_t tag, double value1, double value2, double value3)
-      : mTag(tag),
-        mCount(3) {
-    DoubleToRational(value1,
-                     &mRationals[0].mNumerator, &mRationals[0].mDenominator);
-    DoubleToRational(value2,
-                     &mRationals[1].mNumerator, &mRationals[1].mDenominator);
-    DoubleToRational(value3,
-                     &mRationals[2].mNumerator, &mRationals[2].mDenominator);
+      : mTag(tag), mCount(3) {
+    DoubleToRational(value1, &mRationals[0].mNumerator,
+                     &mRationals[0].mDenominator);
+    DoubleToRational(value2, &mRationals[1].mNumerator,
+                     &mRationals[1].mDenominator);
+    DoubleToRational(value3, &mRationals[2].mNumerator,
+                     &mRationals[2].mDenominator);
   }
 
-  void DoubleToRational(double value,
-                        int32_t* numerator, int32_t* denominator) {
+  void DoubleToRational(double value, int32_t* numerator,
+                        int32_t* denominator) {
     int sign = 1;
     if (value < 0) {
       sign = -sign;
@@ -360,13 +327,9 @@
     info->value = data_offset;
   }
 
-  size_t DataSize() {
-    return sizeof(mRationals[0]) * mCount;
-  }
+  size_t DataSize() { return sizeof(mRationals[0]) * mCount; }
 
-  void AppendData(uint8_t* data) {
-    memcpy(data, &mRationals[0], DataSize());
-  }
+  void AppendData(uint8_t* data) { memcpy(data, &mRationals[0], DataSize()); }
 
  private:
   static const int kMaxSupportedRationals = 3;
@@ -380,7 +343,7 @@
 
 std::string ToAsciiDate(time_t time) {
   struct tm loc;
-  char res[12]; // YYYY:MM:DD\0\0
+  char res[12];  // YYYY:MM:DD\0\0
   localtime_r(&time, &loc);
   strftime(res, sizeof(res), "%Y:%m:%d", &loc);
   return res;
@@ -388,7 +351,7 @@
 
 std::string ToAsciiTime(time_t time) {
   struct tm loc;
-  char res[10]; // HH:MM:SS\0\0
+  char res[10];  // HH:MM:SS\0\0
   localtime_r(&time, &loc);
   strftime(res, sizeof(res), "%H:%M:%S", &loc);
   return res;
@@ -397,8 +360,7 @@
 }  // namespace
 
 ExifMetadataBuilder::ExifMetadataBuilder()
-    : mImageIfd(new ExifStructure),
-      mThumbnailIfd(new ExifStructure) {
+    : mImageIfd(new ExifStructure), mThumbnailIfd(new ExifStructure) {
   // Mandatory tag: camera details.
   ExifSubIfdTag* sub_ifd = new ExifSubIfdTag(kExifTagCameraSubIFD);
   // Pass ownership to mImageIfd.
@@ -454,8 +416,8 @@
   int minutes = latitude;
   latitude = (latitude - minutes) * 60.;
   double seconds = latitude;
-  mGpsSubIfd->PushTag(new ExifURationalTag(kExifTagGpsLatitude,
-                                           degrees, minutes, seconds));
+  mGpsSubIfd->PushTag(
+      new ExifURationalTag(kExifTagGpsLatitude, degrees, minutes, seconds));
 }
 
 void ExifMetadataBuilder::SetGpsLongitude(double longitude) {
@@ -469,8 +431,8 @@
   int32_t minutes = longitude;
   longitude = (longitude - minutes) * 60.;
   double seconds = longitude;
-  mGpsSubIfd->PushTag(new ExifURationalTag(kExifTagGpsLongitude,
-                                           degrees, minutes, seconds));
+  mGpsSubIfd->PushTag(
+      new ExifURationalTag(kExifTagGpsLongitude, degrees, minutes, seconds));
 }
 
 void ExifMetadataBuilder::SetGpsAltitude(double altitude) {
@@ -479,8 +441,8 @@
 }
 
 void ExifMetadataBuilder::SetGpsProcessingMethod(const std::string& method) {
-  mGpsSubIfd->PushTag(new ExifCharArrayTag(
-      kExifTagGpsProcessingMethod, kExifCharArrayAscii, method));
+  mGpsSubIfd->PushTag(new ExifCharArrayTag(kExifTagGpsProcessingMethod,
+                                           kExifCharArrayAscii, method));
 }
 
 void ExifMetadataBuilder::SetGpsDateTime(time_t timestamp) {
@@ -489,10 +451,9 @@
   timestamp /= 60;
   int32_t minutes = (timestamp % 60);
   timestamp /= 60;
-  mGpsSubIfd->PushTag(new ExifURationalTag(
-      kExifTagGpsTimestamp, timestamp % 24, minutes, seconds));
-  mGpsSubIfd->PushTag(new ExifStringTag(
-      kExifTagGpsDatestamp, date));
+  mGpsSubIfd->PushTag(new ExifURationalTag(kExifTagGpsTimestamp, timestamp % 24,
+                                           minutes, seconds));
+  mGpsSubIfd->PushTag(new ExifStringTag(kExifTagGpsDatestamp, date));
 }
 
 void ExifMetadataBuilder::SetLensFocalLength(double length) {
@@ -502,22 +463,26 @@
 
 void ExifMetadataBuilder::Build() {
   const uint8_t exif_header[] = {
-    'E', 'x', 'i', 'f', 0, 0,  // EXIF header.
+      'E', 'x', 'i', 'f', 0, 0,  // EXIF header.
   };
 
   const uint8_t tiff_header[] = {
-    'I', 'I', 0x2a, 0x00,      // TIFF Little endian header.
+      'I', 'I', 0x2a, 0x00,  // TIFF Little endian header.
   };
 
   // EXIF data should be exactly this much.
   size_t exif_size = sizeof(exif_header) + sizeof(tiff_header) +
-      sizeof(uint32_t) + // Offset of the following descriptors.
-      mImageIfd->Size() + mThumbnailIfd->Size();
+                     sizeof(uint32_t) +  // Offset of the following descriptors.
+                     mImageIfd->Size() + mThumbnailIfd->Size();
 
   const uint8_t marker[] = {
-    0xff, 0xd8, 0xff, 0xe1,          // EXIF marker.
-    uint8_t((exif_size + 2) >> 8),   // Data length (including the length field)
-    uint8_t((exif_size + 2) & 0xff),
+      0xff,
+      0xd8,
+      0xff,
+      0xe1,  // EXIF marker.
+      uint8_t((exif_size + 2) >>
+              8),  // Data length (including the length field)
+      uint8_t((exif_size + 2) & 0xff),
   };
 
   // Reserve data for our exif info.
diff --git a/guest/hals/camera/ExifMetadataBuilder.h b/guest/hals/camera/ExifMetadataBuilder.h
index 6180c45..57986ba 100644
--- a/guest/hals/camera/ExifMetadataBuilder.h
+++ b/guest/hals/camera/ExifMetadataBuilder.h
@@ -36,9 +36,7 @@
   void SetLensFocalLength(double length);
   void Build();
 
-  const AutoFreeBuffer& Buffer() {
-    return mData;
-  }
+  const AutoFreeBuffer& Buffer() { return mData; }
 
  private:
   ExifStructure* mImageIfd;
@@ -49,7 +47,7 @@
   AutoFreeBuffer mData;
 
   ExifMetadataBuilder(const ExifMetadataBuilder&);
-  ExifMetadataBuilder& operator= (const ExifMetadataBuilder&);
+  ExifMetadataBuilder& operator=(const ExifMetadataBuilder&);
 };
 
 }  // namespace android
diff --git a/guest/hals/camera/GrallocModule.h b/guest/hals/camera/GrallocModule.h
index ad1eab4..2033e8d 100644
--- a/guest/hals/camera/GrallocModule.h
+++ b/guest/hals/camera/GrallocModule.h
@@ -18,23 +18,21 @@
 
 #include <hardware/gralloc.h>
 
-class GrallocModule
-{
-public:
+class GrallocModule {
+ public:
   static GrallocModule &getInstance() {
     static GrallocModule instance;
     return instance;
   }
 
-  int lock(buffer_handle_t handle,
-      int usage, int l, int t, int w, int h, void **vaddr) {
+  int lock(buffer_handle_t handle, int usage, int l, int t, int w, int h,
+           void **vaddr) {
     return mModule->lock(mModule, handle, usage, l, t, w, h, vaddr);
   }
 
 #ifdef GRALLOC_MODULE_API_VERSION_0_2
-  int lock_ycbcr(buffer_handle_t handle,
-      int usage, int l, int t, int w, int h,
-      struct android_ycbcr *ycbcr) {
+  int lock_ycbcr(buffer_handle_t handle, int usage, int l, int t, int w, int h,
+                 struct android_ycbcr *ycbcr) {
     return mModule->lock_ycbcr(mModule, handle, usage, l, t, w, h, ycbcr);
   }
 #endif
@@ -43,14 +41,14 @@
     return mModule->unlock(mModule, handle);
   }
 
-private:
+ private:
   GrallocModule() {
     const hw_module_t *module = NULL;
     int ret = hw_get_module(GRALLOC_HARDWARE_MODULE_ID, &module);
     if (ret) {
       ALOGE("%s: Failed to get gralloc module: %d", __FUNCTION__, ret);
     }
-    mModule = reinterpret_cast<const gralloc_module_t*>(module);
+    mModule = reinterpret_cast<const gralloc_module_t *>(module);
   }
   const gralloc_module_t *mModule;
 };
diff --git a/guest/hals/camera/ImageMetadata.h b/guest/hals/camera/ImageMetadata.h
index 56020f2..61a3adb 100644
--- a/guest/hals/camera/ImageMetadata.h
+++ b/guest/hals/camera/ImageMetadata.h
@@ -1,22 +1,22 @@
 #ifndef IMAGEMETADATA_H_
 #define IMAGEMETADATA_H_
 
-#include <string>
 #include <stdint.h>
+#include <string>
 
 extern "C" {
 /* Describes various attributes of the picture. */
 struct ImageMetadata {
-    int mWidth;
-    int mHeight;
-    int mThumbnailWidth;
-    int mThumbnailHeight;
-    double mLensFocalLength;
-    double mGpsLatitude;
-    double mGpsLongitude;
-    double mGpsAltitude;
-    time_t mGpsTimestamp;
-    std::string mGpsProcessingMethod;
+  int mWidth;
+  int mHeight;
+  int mThumbnailWidth;
+  int mThumbnailHeight;
+  double mLensFocalLength;
+  double mGpsLatitude;
+  double mGpsLongitude;
+  double mGpsAltitude;
+  time_t mGpsTimestamp;
+  std::string mGpsProcessingMethod;
 };
 }
 
diff --git a/guest/hals/camera/JpegCompressor.cpp b/guest/hals/camera/JpegCompressor.cpp
index 8ee069f..67d71ee 100644
--- a/guest/hals/camera/JpegCompressor.cpp
+++ b/guest/hals/camera/JpegCompressor.cpp
@@ -21,75 +21,70 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_JPEG"
-#include <cutils/log.h>
-#include <assert.h>
-#include <dlfcn.h>
 #include "JpegCompressor.h"
+#include <assert.h>
+#include <cutils/log.h>
+#include <dlfcn.h>
 
 namespace android {
 
 void* NV21JpegCompressor::mDl = NULL;
 
 static void* getSymbol(void* dl, const char* signature) {
-    void* res = dlsym(dl, signature);
-    assert (res != NULL);
+  void* res = dlsym(dl, signature);
+  assert(res != NULL);
 
-    return res;
+  return res;
 }
 
 typedef void (*InitFunc)(JpegStub* stub, int* strides);
 typedef void (*CleanupFunc)(JpegStub* stub);
-typedef int (*CompressFunc)(JpegStub* stub, const void* image,
-                            int quality, const ImageMetadata* meta);
+typedef int (*CompressFunc)(JpegStub* stub, const void* image, int quality,
+                            const ImageMetadata* meta);
 typedef void (*GetCompressedImageFunc)(JpegStub* stub, void* buff);
 typedef size_t (*GetCompressedSizeFunc)(JpegStub* stub);
 
-NV21JpegCompressor::NV21JpegCompressor()
-{
-    if (mDl == NULL) {
-        mDl = dlopen("/vendor/lib/hw/camera.gce_x86.jpeg.so", RTLD_NOW);
-    }
-    if (mDl == NULL) {
-        mDl = dlopen("/system/lib/hw/camera.gce_x86.jpeg.so", RTLD_NOW);
-    }
-    assert(mDl != NULL);
+NV21JpegCompressor::NV21JpegCompressor() {
+  if (mDl == NULL) {
+    mDl = dlopen("/vendor/lib/hw/camera.gce_x86.jpeg.so", RTLD_NOW);
+  }
+  if (mDl == NULL) {
+    mDl = dlopen("/system/lib/hw/camera.gce_x86.jpeg.so", RTLD_NOW);
+  }
+  assert(mDl != NULL);
 
-    InitFunc f = (InitFunc)getSymbol(mDl, "JpegStub_init");
-    (*f)(&mStub, mStrides);
+  InitFunc f = (InitFunc)getSymbol(mDl, "JpegStub_init");
+  (*f)(&mStub, mStrides);
 }
 
-NV21JpegCompressor::~NV21JpegCompressor()
-{
-    CleanupFunc f = (CleanupFunc)getSymbol(mDl, "JpegStub_cleanup");
-    (*f)(&mStub);
+NV21JpegCompressor::~NV21JpegCompressor() {
+  CleanupFunc f = (CleanupFunc)getSymbol(mDl, "JpegStub_cleanup");
+  (*f)(&mStub);
 }
 
 /****************************************************************************
  * Public API
  ***************************************************************************/
 
-status_t NV21JpegCompressor::compressRawImage(
-    const void* image, const ImageMetadata* meta, int quality)
-{
-    mStrides[0] = meta->mWidth;
-    mStrides[1] = meta->mWidth;
-    CompressFunc f = (CompressFunc)getSymbol(mDl, "JpegStub_compress");
-    return (status_t)(*f)(&mStub, image, quality, meta);
+status_t NV21JpegCompressor::compressRawImage(const void* image,
+                                              const ImageMetadata* meta,
+                                              int quality) {
+  mStrides[0] = meta->mWidth;
+  mStrides[1] = meta->mWidth;
+  CompressFunc f = (CompressFunc)getSymbol(mDl, "JpegStub_compress");
+  return (status_t)(*f)(&mStub, image, quality, meta);
 }
 
-
-size_t NV21JpegCompressor::getCompressedSize()
-{
-    GetCompressedSizeFunc f = (GetCompressedSizeFunc)getSymbol(mDl,
-            "JpegStub_getCompressedSize");
-    return (*f)(&mStub);
+size_t NV21JpegCompressor::getCompressedSize() {
+  GetCompressedSizeFunc f =
+      (GetCompressedSizeFunc)getSymbol(mDl, "JpegStub_getCompressedSize");
+  return (*f)(&mStub);
 }
 
-void NV21JpegCompressor::getCompressedImage(void* buff)
-{
-    GetCompressedImageFunc f = (GetCompressedImageFunc)getSymbol(mDl,
-            "JpegStub_getCompressedImage");
-    (*f)(&mStub, buff);
+void NV21JpegCompressor::getCompressedImage(void* buff) {
+  GetCompressedImageFunc f =
+      (GetCompressedImageFunc)getSymbol(mDl, "JpegStub_getCompressedImage");
+  (*f)(&mStub, buff);
 }
 
 }; /* namespace android */
diff --git a/guest/hals/camera/JpegCompressor.h b/guest/hals/camera/JpegCompressor.h
index e27fd7d..736d9b5 100644
--- a/guest/hals/camera/JpegCompressor.h
+++ b/guest/hals/camera/JpegCompressor.h
@@ -22,73 +22,71 @@
  * converter between YV21, and JPEG formats.
  */
 
-#include "JpegStub.h"
 #include <utils/threads.h>
+#include "JpegStub.h"
 
 namespace android {
 
 /* Encapsulates a converter between YV12, and JPEG formats.
  */
-class NV21JpegCompressor
-{
-public:
-    /* Constructs JpegCompressor instance. */
-    NV21JpegCompressor();
-    /* Destructs JpegCompressor instance. */
-    ~NV21JpegCompressor();
+class NV21JpegCompressor {
+ public:
+  /* Constructs JpegCompressor instance. */
+  NV21JpegCompressor();
+  /* Destructs JpegCompressor instance. */
+  ~NV21JpegCompressor();
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-public:
-    /* Compresses raw NV21 image into a JPEG.
-     * The compressed image will be saved in mStream member of this class. Use
-     * getCompressedSize method to obtain buffer size of the compressed image,
-     * and getCompressedImage to copy out the compressed image.
-     * Param:
-     *  image - Raw NV21 image.
-     *  metadata - Image metadata (dimensions, location etc).
-     *  quality - JPEG quality.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     *
-     */
-    status_t compressRawImage(const void* image,
-                              const ImageMetadata* metadata,
-                              int quality);
+ public:
+  /* Compresses raw NV21 image into a JPEG.
+   * The compressed image will be saved in mStream member of this class. Use
+   * getCompressedSize method to obtain buffer size of the compressed image,
+   * and getCompressedImage to copy out the compressed image.
+   * Param:
+   *  image - Raw NV21 image.
+   *  metadata - Image metadata (dimensions, location etc).
+   *  quality - JPEG quality.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   *
+   */
+  status_t compressRawImage(const void* image, const ImageMetadata* metadata,
+                            int quality);
 
-    /* Get size of the compressed JPEG buffer.
-     * This method must be called only after a successful completion of
-     * compressRawImage call.
-     * Return:
-     *  Size of the compressed JPEG buffer.
-     */
-    size_t getCompressedSize();
+  /* Get size of the compressed JPEG buffer.
+   * This method must be called only after a successful completion of
+   * compressRawImage call.
+   * Return:
+   *  Size of the compressed JPEG buffer.
+   */
+  size_t getCompressedSize();
 
-    /* Copies out compressed JPEG buffer.
-     * This method must be called only after a successful completion of
-     * compressRawImage call.
-     * Param:
-     *  buff - Buffer where to copy the JPEG. Must be large enough to contain the
-     *      entire image.
-     */
-    void getCompressedImage(void* buff);
+  /* Copies out compressed JPEG buffer.
+   * This method must be called only after a successful completion of
+   * compressRawImage call.
+   * Param:
+   *  buff - Buffer where to copy the JPEG. Must be large enough to contain the
+   *      entire image.
+   */
+  void getCompressedImage(void* buff);
 
-    /****************************************************************************
-     * Class data
-     ***************************************************************************/
+  /****************************************************************************
+   * Class data
+   ***************************************************************************/
 
-protected:
-    /* Strides for Y (the first element), and UV (the second one) panes. */
-    int                     mStrides[2];
+ protected:
+  /* Strides for Y (the first element), and UV (the second one) panes. */
+  int mStrides[2];
 
-private:
-    // library handle to dlopen
-    static void* mDl;
-    JpegStub mStub;
+ private:
+  // library handle to dlopen
+  static void* mDl;
+  JpegStub mStub;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_JPEG_COMPRESSOR_H */
+#endif /* HW_EMULATOR_CAMERA_JPEG_COMPRESSOR_H */
diff --git a/guest/hals/camera/JpegStub.cpp b/guest/hals/camera/JpegStub.cpp
index ff23d4b..f186e03 100644
--- a/guest/hals/camera/JpegStub.cpp
+++ b/guest/hals/camera/JpegStub.cpp
@@ -16,18 +16,18 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_JPEGStub"
-#include <errno.h>
-#include <cutils/log.h>
-#include <libyuv.h>
 #include <YuvToJpegEncoder.h>
+#include <cutils/log.h>
+#include <errno.h>
+#include <libyuv.h>
 
 #include "ExifMetadataBuilder.h"
 #include "JpegStub.h"
 
 namespace {
-bool GenerateThumbnail(
-    const uint8_t* source_nv21, int source_width, int source_height,
-    int thumbnail_width, int thumbnail_height, SkDynamicMemoryWStream* target) {
+bool GenerateThumbnail(const uint8_t* source_nv21, int source_width,
+                       int source_height, int thumbnail_width,
+                       int thumbnail_height, SkDynamicMemoryWStream* target) {
   // We need to convert the image to Y'UV420SP to I420, which seems to be the
   // only scalable format by the LibYUV.
   // These formats are similar in their memory occupancy (both use about 3/2 of
@@ -38,31 +38,24 @@
   uint8_t* temp_u = temp_y + temp_y_size;
   uint8_t* temp_v = temp_u + temp_uv_size;
 
-  libyuv::NV12ToI420(
-      source_nv21, source_width,
-      source_nv21 + temp_y_size, source_width,
-      temp_y, source_width,
-      temp_u, source_width / 2,
-      temp_v, source_width / 2,
-      source_width, source_height);
+  libyuv::NV12ToI420(source_nv21, source_width, source_nv21 + temp_y_size,
+                     source_width, temp_y, source_width, temp_u,
+                     source_width / 2, temp_v, source_width / 2, source_width,
+                     source_height);
 
   // Compute and allocate memory for thumbnail I420.
   int thumb_y_size = thumbnail_width * thumbnail_height;
   int thumb_uv_size = thumb_y_size / 4;
-  uint8_t* thumb_y = (uint8_t*)malloc(thumb_y_size + thumb_uv_size + thumb_uv_size);
+  uint8_t* thumb_y =
+      (uint8_t*)malloc(thumb_y_size + thumb_uv_size + thumb_uv_size);
   uint8_t* thumb_u = thumb_y + thumb_y_size;
   uint8_t* thumb_v = thumb_u + thumb_uv_size;
 
-  libyuv::I420Scale(
-      temp_y, source_width,
-      temp_u, source_width / 2,
-      temp_v, source_width / 2,
-      source_width, source_height,
-      thumb_y, thumbnail_width,
-      thumb_u, thumbnail_width / 2,
-      thumb_v, thumbnail_width / 2,
-      thumbnail_width, thumbnail_height,
-      libyuv::kFilterBilinear);
+  libyuv::I420Scale(temp_y, source_width, temp_u, source_width / 2, temp_v,
+                    source_width / 2, source_width, source_height, thumb_y,
+                    thumbnail_width, thumb_u, thumbnail_width / 2, thumb_v,
+                    thumbnail_width / 2, thumbnail_width, thumbnail_height,
+                    libyuv::kFilterBilinear);
 
   // Combine U and V components back to NV21 format.
   // We can re-use temp_y buffer for our needs at this point.
@@ -76,18 +69,18 @@
   memcpy(thumb_u, temp_y, thumb_uv_size * 2);
 
   // Compress image.
-  int strides[2] = { thumbnail_width, thumbnail_width };
-  int offsets[2] = { 0, thumb_y_size };
+  int strides[2] = {thumbnail_width, thumbnail_width};
+  int offsets[2] = {0, thumb_y_size};
   Yuv420SpToJpegEncoder* encoder = new Yuv420SpToJpegEncoder(strides);
 
-  bool result = encoder->encode(
-      target, thumb_y, thumbnail_width, thumbnail_height, offsets, 90);
+  bool result = encoder->encode(target, thumb_y, thumbnail_width,
+                                thumbnail_height, offsets, 90);
 
   if (!result) {
     ALOGE("%s: Thumbnail compression failed", __FUNCTION__);
   }
 
-  delete(encoder);
+  delete (encoder);
   free(thumb_y);
   free(temp_y);
 
@@ -96,99 +89,95 @@
 }  // namespace
 
 extern "C" void JpegStub_init(JpegStub* stub, int* strides) {
-    stub->mInternalEncoder = (void*) new Yuv420SpToJpegEncoder(strides);
-    stub->mInternalStream = (void*)new SkDynamicMemoryWStream();
-    stub->mExifBuilder = (void*)new android::ExifMetadataBuilder();
+  stub->mInternalEncoder = (void*)new Yuv420SpToJpegEncoder(strides);
+  stub->mInternalStream = (void*)new SkDynamicMemoryWStream();
+  stub->mExifBuilder = (void*)new android::ExifMetadataBuilder();
 }
 
 extern "C" void JpegStub_cleanup(JpegStub* stub) {
-    delete((Yuv420SpToJpegEncoder*)stub->mInternalEncoder);
-    delete((SkDynamicMemoryWStream*)stub->mInternalStream);
-    delete((android::ExifMetadataBuilder*)stub->mExifBuilder);
+  delete ((Yuv420SpToJpegEncoder*)stub->mInternalEncoder);
+  delete ((SkDynamicMemoryWStream*)stub->mInternalStream);
+  delete ((android::ExifMetadataBuilder*)stub->mExifBuilder);
 }
 
-extern "C" int JpegStub_compress(JpegStub* stub, const void* image,
-                                 int quality, const ImageMetadata* meta)
-{
-    void* pY = const_cast<void*>(image);
+extern "C" int JpegStub_compress(JpegStub* stub, const void* image, int quality,
+                                 const ImageMetadata* meta) {
+  void* pY = const_cast<void*>(image);
 
-    int offsets[2];
-    offsets[0] = 0;
-    offsets[1] = meta->mWidth * meta->mHeight;
+  int offsets[2];
+  offsets[0] = 0;
+  offsets[1] = meta->mWidth * meta->mHeight;
 
-    Yuv420SpToJpegEncoder* encoder =
-        (Yuv420SpToJpegEncoder*)stub->mInternalEncoder;
-    SkDynamicMemoryWStream* stream =
-        (SkDynamicMemoryWStream*)stub->mInternalStream;
-    android::ExifMetadataBuilder* exif =
-        (android::ExifMetadataBuilder*)stub->mExifBuilder;
+  Yuv420SpToJpegEncoder* encoder =
+      (Yuv420SpToJpegEncoder*)stub->mInternalEncoder;
+  SkDynamicMemoryWStream* stream =
+      (SkDynamicMemoryWStream*)stub->mInternalStream;
+  android::ExifMetadataBuilder* exif =
+      (android::ExifMetadataBuilder*)stub->mExifBuilder;
 
-    exif->SetWidth(meta->mWidth);
-    exif->SetHeight(meta->mHeight);
-    exif->SetDateTime(time(NULL));
-    if (meta->mLensFocalLength != -1)
-      exif->SetLensFocalLength(meta->mLensFocalLength);
-    if (meta->mGpsTimestamp != -1) {
-      exif->SetGpsLatitude(meta->mGpsLatitude);
-      exif->SetGpsLongitude(meta->mGpsLongitude);
-      exif->SetGpsAltitude(meta->mGpsAltitude);
-      exif->SetGpsDateTime(meta->mGpsTimestamp);
-      exif->SetGpsProcessingMethod(meta->mGpsProcessingMethod);
-    }
+  exif->SetWidth(meta->mWidth);
+  exif->SetHeight(meta->mHeight);
+  exif->SetDateTime(time(NULL));
+  if (meta->mLensFocalLength != -1)
+    exif->SetLensFocalLength(meta->mLensFocalLength);
+  if (meta->mGpsTimestamp != -1) {
+    exif->SetGpsLatitude(meta->mGpsLatitude);
+    exif->SetGpsLongitude(meta->mGpsLongitude);
+    exif->SetGpsAltitude(meta->mGpsAltitude);
+    exif->SetGpsDateTime(meta->mGpsTimestamp);
+    exif->SetGpsProcessingMethod(meta->mGpsProcessingMethod);
+  }
 
-    ALOGV("%s: Requested thumbnail size: %dx%d",
-          __FUNCTION__, meta->mThumbnailWidth, meta->mThumbnailHeight);
+  ALOGV("%s: Requested thumbnail size: %dx%d", __FUNCTION__,
+        meta->mThumbnailWidth, meta->mThumbnailHeight);
 
-    // Thumbnail requested?
-    if (meta->mThumbnailWidth > 0 && meta->mThumbnailHeight > 0) {
-      exif->SetThumbnailWidth(meta->mThumbnailWidth);
-      exif->SetThumbnailHeight(meta->mThumbnailHeight);
-      SkDynamicMemoryWStream* thumbnail = new SkDynamicMemoryWStream();
-      GenerateThumbnail(
-          (uint8_t*)pY,
-          meta->mWidth, meta->mHeight,
-          meta->mThumbnailWidth, meta->mThumbnailHeight,
-          thumbnail);
+  // Thumbnail requested?
+  if (meta->mThumbnailWidth > 0 && meta->mThumbnailHeight > 0) {
+    exif->SetThumbnailWidth(meta->mThumbnailWidth);
+    exif->SetThumbnailHeight(meta->mThumbnailHeight);
+    SkDynamicMemoryWStream* thumbnail = new SkDynamicMemoryWStream();
+    GenerateThumbnail((uint8_t*)pY, meta->mWidth, meta->mHeight,
+                      meta->mThumbnailWidth, meta->mThumbnailHeight, thumbnail);
 
-      int thumbnail_size = thumbnail->bytesWritten();
-      void* thumbnail_data = malloc(thumbnail_size);
-      thumbnail->read(thumbnail_data, 0, thumbnail_size);
-      // Pass ownership to EXIF builder.
-      exif->SetThumbnail(thumbnail_data, thumbnail_size);
-      delete thumbnail;
-    }
+    int thumbnail_size = thumbnail->bytesWritten();
+    void* thumbnail_data = malloc(thumbnail_size);
+    thumbnail->read(thumbnail_data, 0, thumbnail_size);
+    // Pass ownership to EXIF builder.
+    exif->SetThumbnail(thumbnail_data, thumbnail_size);
+    delete thumbnail;
+  }
 
-    exif->Build();
+  exif->Build();
 
-    if (encoder->encode(stream, pY, meta->mWidth, meta->mHeight,
-                        offsets, quality)) {
-        ALOGI("%s: Compressed JPEG: %d[%dx%d] -> %zu bytes",
-              __FUNCTION__, (meta->mWidth * meta->mHeight * 12) / 8,
-              meta->mWidth, meta->mHeight, stream->bytesWritten());
-        return 0;
-    } else {
-        ALOGE("%s: JPEG compression failed", __FUNCTION__);
-        return errno ? errno: EINVAL;
-    }
+  if (encoder->encode(stream, pY, meta->mWidth, meta->mHeight, offsets,
+                      quality)) {
+    ALOGI("%s: Compressed JPEG: %d[%dx%d] -> %zu bytes", __FUNCTION__,
+          (meta->mWidth * meta->mHeight * 12) / 8, meta->mWidth, meta->mHeight,
+          stream->bytesWritten());
+    return 0;
+  } else {
+    ALOGE("%s: JPEG compression failed", __FUNCTION__);
+    return errno ? errno : EINVAL;
+  }
 }
 
 extern "C" void JpegStub_getCompressedImage(JpegStub* stub, void* buff) {
-    SkDynamicMemoryWStream* stream =
-        (SkDynamicMemoryWStream*)stub->mInternalStream;
-    android::ExifMetadataBuilder* exif =
-        (android::ExifMetadataBuilder*)stub->mExifBuilder;
-    char* target = (char*)buff;
-    memcpy(buff, exif->Buffer().data(), exif->Buffer().size());
-    target += exif->Buffer().size();
+  SkDynamicMemoryWStream* stream =
+      (SkDynamicMemoryWStream*)stub->mInternalStream;
+  android::ExifMetadataBuilder* exif =
+      (android::ExifMetadataBuilder*)stub->mExifBuilder;
+  char* target = (char*)buff;
+  memcpy(buff, exif->Buffer().data(), exif->Buffer().size());
+  target += exif->Buffer().size();
 
-    // Skip 0xFFD8 marker. This marker has already been included in Metadata.
-    stream->read(target, 2, stream->bytesWritten() - 2);
+  // Skip 0xFFD8 marker. This marker has already been included in Metadata.
+  stream->read(target, 2, stream->bytesWritten() - 2);
 }
 
 extern "C" size_t JpegStub_getCompressedSize(JpegStub* stub) {
-    SkDynamicMemoryWStream* stream =
-        (SkDynamicMemoryWStream*)stub->mInternalStream;
-    android::ExifMetadataBuilder* exif =
-        (android::ExifMetadataBuilder*)stub->mExifBuilder;
-    return stream->bytesWritten() + exif->Buffer().size() - 2;
+  SkDynamicMemoryWStream* stream =
+      (SkDynamicMemoryWStream*)stub->mInternalStream;
+  android::ExifMetadataBuilder* exif =
+      (android::ExifMetadataBuilder*)stub->mExifBuilder;
+  return stream->bytesWritten() + exif->Buffer().size() - 2;
 }
diff --git a/guest/hals/camera/JpegStub.h b/guest/hals/camera/JpegStub.h
index 10e5bf8..639f906 100644
--- a/guest/hals/camera/JpegStub.h
+++ b/guest/hals/camera/JpegStub.h
@@ -22,9 +22,9 @@
 extern "C" {
 
 struct JpegStub {
-    void* mInternalEncoder;
-    void* mInternalStream;
-    void* mExifBuilder;
+  void* mInternalEncoder;
+  void* mInternalStream;
+  void* mExifBuilder;
 };
 
 void JpegStub_init(JpegStub* stub, int* strides);
@@ -33,6 +33,5 @@
                       const ImageMetadata* metadata);
 void JpegStub_getCompressedImage(JpegStub* stub, void* buff);
 size_t JpegStub_getCompressedSize(JpegStub* stub);
-
 };
-#endif // JPEGSTUB_H_
+#endif  // JPEGSTUB_H_
diff --git a/guest/hals/camera/PreviewWindow.cpp b/guest/hals/camera/PreviewWindow.cpp
index 281a3fa..d4eb59b 100644
--- a/guest/hals/camera/PreviewWindow.cpp
+++ b/guest/hals/camera/PreviewWindow.cpp
@@ -21,9 +21,9 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_Preview"
+#include "PreviewWindow.h"
 #include <cutils/log.h>
 #include "EmulatedCameraDevice.h"
-#include "PreviewWindow.h"
 #include "GrallocModule.h"
 
 namespace android {
@@ -33,183 +33,170 @@
       mLastPreviewed(0),
       mPreviewFrameWidth(0),
       mPreviewFrameHeight(0),
-      mPreviewEnabled(false)
-{
-}
+      mPreviewEnabled(false) {}
 
-PreviewWindow::~PreviewWindow()
-{
-}
+PreviewWindow::~PreviewWindow() {}
 
 /****************************************************************************
  * Camera API
  ***************************************************************************/
 
 status_t PreviewWindow::setPreviewWindow(struct preview_stream_ops* window,
-                                         int preview_fps)
-{
-    ALOGV("%s: current: %p -> new: %p", __FUNCTION__, mPreviewWindow, window);
+                                         int preview_fps) {
+  ALOGV("%s: current: %p -> new: %p", __FUNCTION__, mPreviewWindow, window);
 
-    status_t res = NO_ERROR;
-    Mutex::Autolock locker(&mObjectLock);
+  status_t res = NO_ERROR;
+  Mutex::Autolock locker(&mObjectLock);
 
-    /* Reset preview info. */
-    mPreviewFrameWidth = mPreviewFrameHeight = 0;
-    mPreviewAfter = 0;
-    mLastPreviewed = 0;
+  /* Reset preview info. */
+  mPreviewFrameWidth = mPreviewFrameHeight = 0;
+  mPreviewAfter = 0;
+  mLastPreviewed = 0;
 
-    if (window != NULL) {
-        /* The CPU will write each frame to the preview window buffer.
-         * Note that we delay setting preview window buffer geometry until
-         * frames start to come in. */
-        res = window->set_usage(window, GRALLOC_USAGE_SW_WRITE_OFTEN);
-        if (res == NO_ERROR) {
-            /* Set preview frequency. */
-            mPreviewAfter = 1000000 / preview_fps;
-        } else {
-            window = NULL;
-            res = -res; // set_usage returns a negative errno.
-            ALOGE("%s: Error setting preview window usage %d -> %s",
-                 __FUNCTION__, res, strerror(res));
-        }
+  if (window != NULL) {
+    /* The CPU will write each frame to the preview window buffer.
+     * Note that we delay setting preview window buffer geometry until
+     * frames start to come in. */
+    res = window->set_usage(window, GRALLOC_USAGE_SW_WRITE_OFTEN);
+    if (res == NO_ERROR) {
+      /* Set preview frequency. */
+      mPreviewAfter = 1000000 / preview_fps;
+    } else {
+      window = NULL;
+      res = -res;  // set_usage returns a negative errno.
+      ALOGE("%s: Error setting preview window usage %d -> %s", __FUNCTION__,
+            res, strerror(res));
     }
-    mPreviewWindow = window;
+  }
+  mPreviewWindow = window;
 
-    return res;
+  return res;
 }
 
-status_t PreviewWindow::startPreview()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t PreviewWindow::startPreview() {
+  ALOGV("%s", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    mPreviewEnabled = true;
+  Mutex::Autolock locker(&mObjectLock);
+  mPreviewEnabled = true;
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
-void PreviewWindow::stopPreview()
-{
-    ALOGV("%s", __FUNCTION__);
+void PreviewWindow::stopPreview() {
+  ALOGV("%s", __FUNCTION__);
 
-    Mutex::Autolock locker(&mObjectLock);
-    mPreviewEnabled = false;
+  Mutex::Autolock locker(&mObjectLock);
+  mPreviewEnabled = false;
 }
 
 /****************************************************************************
  * Public API
  ***************************************************************************/
 
-void PreviewWindow::onNextFrameAvailable(const void* frame,
-                                         nsecs_t timestamp,
-                                         EmulatedCameraDevice* camera_dev)
-{
-    int res;
-    Mutex::Autolock locker(&mObjectLock);
+void PreviewWindow::onNextFrameAvailable(const void* frame, nsecs_t timestamp,
+                                         EmulatedCameraDevice* camera_dev) {
+  int res;
+  Mutex::Autolock locker(&mObjectLock);
 
-    if (!isPreviewEnabled() || mPreviewWindow == NULL || !isPreviewTime()) {
-        return;
-    }
+  if (!isPreviewEnabled() || mPreviewWindow == NULL || !isPreviewTime()) {
+    return;
+  }
 
-    /* Make sure that preview window dimensions are OK with the camera device */
-    if (adjustPreviewDimensions(camera_dev)) {
-        /* Need to set / adjust buffer geometry for the preview window.
-         * Note that in the emulator preview window uses only RGB for pixel
-         * formats. */
-        ALOGV("%s: Adjusting preview windows %p geometry to %dx%d",
-             __FUNCTION__, mPreviewWindow, mPreviewFrameWidth,
-             mPreviewFrameHeight);
-        res = mPreviewWindow->set_buffers_geometry(mPreviewWindow,
-                                                   mPreviewFrameWidth,
-                                                   mPreviewFrameHeight,
-                                                   HAL_PIXEL_FORMAT_RGBA_8888);
-        if (res != NO_ERROR) {
-            ALOGE("%s: Error in set_buffers_geometry %d -> %s",
-                 __FUNCTION__, -res, strerror(-res));
-            return;
-        }
-    }
-
-    /*
-     * Push new frame to the preview window.
-     */
-
-    /* Dequeue preview window buffer for the frame. */
-    buffer_handle_t* buffer = NULL;
-    int stride = 0;
-    res = mPreviewWindow->dequeue_buffer(mPreviewWindow, &buffer, &stride);
-    if (res != NO_ERROR || buffer == NULL) {
-        ALOGE("%s: Unable to dequeue preview window buffer: %d -> %s",
-            __FUNCTION__, -res, strerror(-res));
-        return;
-    }
-
-    /* Let the preview window to lock the buffer. */
-    res = mPreviewWindow->lock_buffer(mPreviewWindow, buffer);
+  /* Make sure that preview window dimensions are OK with the camera device */
+  if (adjustPreviewDimensions(camera_dev)) {
+    /* Need to set / adjust buffer geometry for the preview window.
+     * Note that in the emulator preview window uses only RGB for pixel
+     * formats. */
+    ALOGV("%s: Adjusting preview windows %p geometry to %dx%d", __FUNCTION__,
+          mPreviewWindow, mPreviewFrameWidth, mPreviewFrameHeight);
+    res = mPreviewWindow->set_buffers_geometry(
+        mPreviewWindow, mPreviewFrameWidth, mPreviewFrameHeight,
+        HAL_PIXEL_FORMAT_RGBA_8888);
     if (res != NO_ERROR) {
-        ALOGE("%s: Unable to lock preview window buffer: %d -> %s",
-             __FUNCTION__, -res, strerror(-res));
-        mPreviewWindow->cancel_buffer(mPreviewWindow, buffer);
-        return;
+      ALOGE("%s: Error in set_buffers_geometry %d -> %s", __FUNCTION__, -res,
+            strerror(-res));
+      return;
     }
+  }
 
-    /* Now let the graphics framework to lock the buffer, and provide
-     * us with the framebuffer data address. */
-    void* img = NULL;
-    res = GrallocModule::getInstance().lock(
-        *buffer, GRALLOC_USAGE_SW_WRITE_OFTEN,
-        0, 0, mPreviewFrameWidth, mPreviewFrameHeight, &img);
-    if (res != NO_ERROR) {
-        ALOGE("%s: gralloc.lock failure: %d -> %s",
-             __FUNCTION__, res, strerror(res));
-        mPreviewWindow->cancel_buffer(mPreviewWindow, buffer);
-        return;
-    }
+  /*
+   * Push new frame to the preview window.
+   */
 
-    /* Frames come in in YV12/NV12/NV21 format. Since preview window doesn't
-     * supports those formats, we need to obtain the frame in RGB565. */
-    res = camera_dev->getCurrentPreviewFrame(img);
-    if (res == NO_ERROR) {
-        /* Show it. */
-        mPreviewWindow->set_timestamp(mPreviewWindow, timestamp);
-        mPreviewWindow->enqueue_buffer(mPreviewWindow, buffer);
-    } else {
-        ALOGE("%s: Unable to obtain preview frame: %d", __FUNCTION__, res);
-        mPreviewWindow->cancel_buffer(mPreviewWindow, buffer);
-    }
-    GrallocModule::getInstance().unlock(*buffer);
+  /* Dequeue preview window buffer for the frame. */
+  buffer_handle_t* buffer = NULL;
+  int stride = 0;
+  res = mPreviewWindow->dequeue_buffer(mPreviewWindow, &buffer, &stride);
+  if (res != NO_ERROR || buffer == NULL) {
+    ALOGE("%s: Unable to dequeue preview window buffer: %d -> %s", __FUNCTION__,
+          -res, strerror(-res));
+    return;
+  }
+
+  /* Let the preview window to lock the buffer. */
+  res = mPreviewWindow->lock_buffer(mPreviewWindow, buffer);
+  if (res != NO_ERROR) {
+    ALOGE("%s: Unable to lock preview window buffer: %d -> %s", __FUNCTION__,
+          -res, strerror(-res));
+    mPreviewWindow->cancel_buffer(mPreviewWindow, buffer);
+    return;
+  }
+
+  /* Now let the graphics framework to lock the buffer, and provide
+   * us with the framebuffer data address. */
+  void* img = NULL;
+  res = GrallocModule::getInstance().lock(*buffer, GRALLOC_USAGE_SW_WRITE_OFTEN,
+                                          0, 0, mPreviewFrameWidth,
+                                          mPreviewFrameHeight, &img);
+  if (res != NO_ERROR) {
+    ALOGE("%s: gralloc.lock failure: %d -> %s", __FUNCTION__, res,
+          strerror(res));
+    mPreviewWindow->cancel_buffer(mPreviewWindow, buffer);
+    return;
+  }
+
+  /* Frames come in in YV12/NV12/NV21 format. Since preview window doesn't
+   * supports those formats, we need to obtain the frame in RGB565. */
+  res = camera_dev->getCurrentPreviewFrame(img);
+  if (res == NO_ERROR) {
+    /* Show it. */
+    mPreviewWindow->set_timestamp(mPreviewWindow, timestamp);
+    mPreviewWindow->enqueue_buffer(mPreviewWindow, buffer);
+  } else {
+    ALOGE("%s: Unable to obtain preview frame: %d", __FUNCTION__, res);
+    mPreviewWindow->cancel_buffer(mPreviewWindow, buffer);
+  }
+  GrallocModule::getInstance().unlock(*buffer);
 }
 
 /***************************************************************************
  * Private API
  **************************************************************************/
 
-bool PreviewWindow::adjustPreviewDimensions(EmulatedCameraDevice* camera_dev)
-{
-    /* Match the cached frame dimensions against the actual ones. */
-    if (mPreviewFrameWidth == camera_dev->getFrameWidth() &&
-        mPreviewFrameHeight == camera_dev->getFrameHeight()) {
-        /* They match. */
-        return false;
-    }
+bool PreviewWindow::adjustPreviewDimensions(EmulatedCameraDevice* camera_dev) {
+  /* Match the cached frame dimensions against the actual ones. */
+  if (mPreviewFrameWidth == camera_dev->getFrameWidth() &&
+      mPreviewFrameHeight == camera_dev->getFrameHeight()) {
+    /* They match. */
+    return false;
+  }
 
-    /* They don't match: adjust the cache. */
-    mPreviewFrameWidth = camera_dev->getFrameWidth();
-    mPreviewFrameHeight = camera_dev->getFrameHeight();
+  /* They don't match: adjust the cache. */
+  mPreviewFrameWidth = camera_dev->getFrameWidth();
+  mPreviewFrameHeight = camera_dev->getFrameHeight();
 
-    return true;
+  return true;
 }
 
-bool PreviewWindow::isPreviewTime()
-{
-    timeval cur_time;
-    gettimeofday(&cur_time, NULL);
-    const uint64_t cur_mks = cur_time.tv_sec * 1000000LL + cur_time.tv_usec;
-    if ((cur_mks - mLastPreviewed) >= mPreviewAfter) {
-        mLastPreviewed = cur_mks;
-        return true;
-    }
-    return false;
+bool PreviewWindow::isPreviewTime() {
+  timeval cur_time;
+  gettimeofday(&cur_time, NULL);
+  const uint64_t cur_mks = cur_time.tv_sec * 1000000LL + cur_time.tv_usec;
+  if ((cur_mks - mLastPreviewed) >= mPreviewAfter) {
+    mLastPreviewed = cur_mks;
+    return true;
+  }
+  return false;
 }
 
 }; /* namespace android */
diff --git a/guest/hals/camera/PreviewWindow.h b/guest/hals/camera/PreviewWindow.h
index d037c95..e1e3b4c 100644
--- a/guest/hals/camera/PreviewWindow.h
+++ b/guest/hals/camera/PreviewWindow.h
@@ -33,133 +33,128 @@
  * relevant camera API callbacks.
  */
 class PreviewWindow {
-public:
-    /* Constructs PreviewWindow instance. */
-    PreviewWindow();
+ public:
+  /* Constructs PreviewWindow instance. */
+  PreviewWindow();
 
-    /* Destructs PreviewWindow instance. */
-    ~PreviewWindow();
+  /* Destructs PreviewWindow instance. */
+  ~PreviewWindow();
 
-    /***************************************************************************
-     * Camera API
-     **************************************************************************/
+  /***************************************************************************
+   * Camera API
+   **************************************************************************/
 
-public:
-    /* Actual handler for camera_device_ops_t::set_preview_window callback.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::set_preview_window callback.
-     * Param:
-     *  window - Preview window to set. This parameter might be NULL, which
-     *      indicates preview window reset.
-     *  preview_fps - Preview's frame frequency. This parameter determins when
-     *      a frame received via onNextFrameAvailable call will be pushed to
-     *      the preview window. If 'window' parameter passed to this method is
-     *      NULL, this parameter is ignored.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    status_t setPreviewWindow(struct preview_stream_ops* window,
-                              int preview_fps);
+ public:
+  /* Actual handler for camera_device_ops_t::set_preview_window callback.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::set_preview_window callback.
+   * Param:
+   *  window - Preview window to set. This parameter might be NULL, which
+   *      indicates preview window reset.
+   *  preview_fps - Preview's frame frequency. This parameter determins when
+   *      a frame received via onNextFrameAvailable call will be pushed to
+   *      the preview window. If 'window' parameter passed to this method is
+   *      NULL, this parameter is ignored.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  status_t setPreviewWindow(struct preview_stream_ops* window, int preview_fps);
 
-    /* Starts the preview.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::start_preview callback.
-     */
-    status_t startPreview();
+  /* Starts the preview.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::start_preview callback.
+   */
+  status_t startPreview();
 
-    /* Stops the preview.
-     * This method is called by the containing emulated camera object when it is
-     * handing the camera_device_ops_t::start_preview callback.
-     */
-    void stopPreview();
+  /* Stops the preview.
+   * This method is called by the containing emulated camera object when it is
+   * handing the camera_device_ops_t::start_preview callback.
+   */
+  void stopPreview();
 
-    /* Checks if preview is enabled. */
-    inline bool isPreviewEnabled()
-    {
-        return mPreviewEnabled;
-    }
+  /* Checks if preview is enabled. */
+  inline bool isPreviewEnabled() { return mPreviewEnabled; }
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-public:
-    /* Next frame is available in the camera device.
-     * This is a notification callback that is invoked by the camera device when
-     * a new frame is available.
-     * Note that most likely this method is called in context of a worker thread
-     * that camera device has created for frame capturing.
-     * Param:
-     *  frame - Captured frame, or NULL if camera device didn't pull the frame
-     *      yet. If NULL is passed in this parameter use GetCurrentFrame method
-     *      of the camera device class to obtain the next frame. Also note that
-     *      the size of the frame that is passed here (as well as the frame
-     *      returned from the GetCurrentFrame method) is defined by the current
-     *      frame settings (width + height + pixel format) for the camera device.
-     * timestamp - Frame's timestamp.
-     * camera_dev - Camera device instance that delivered the frame.
-     */
-    void onNextFrameAvailable(const void* frame,
-                              nsecs_t timestamp,
-                              EmulatedCameraDevice* camera_dev);
+ public:
+  /* Next frame is available in the camera device.
+   * This is a notification callback that is invoked by the camera device when
+   * a new frame is available.
+   * Note that most likely this method is called in context of a worker thread
+   * that camera device has created for frame capturing.
+   * Param:
+   *  frame - Captured frame, or NULL if camera device didn't pull the frame
+   *      yet. If NULL is passed in this parameter use GetCurrentFrame method
+   *      of the camera device class to obtain the next frame. Also note that
+   *      the size of the frame that is passed here (as well as the frame
+   *      returned from the GetCurrentFrame method) is defined by the current
+   *      frame settings (width + height + pixel format) for the camera device.
+   * timestamp - Frame's timestamp.
+   * camera_dev - Camera device instance that delivered the frame.
+   */
+  void onNextFrameAvailable(const void* frame, nsecs_t timestamp,
+                            EmulatedCameraDevice* camera_dev);
 
-    /***************************************************************************
-     * Private API
-     **************************************************************************/
+  /***************************************************************************
+   * Private API
+   **************************************************************************/
 
-protected:
-    /* Adjusts cached dimensions of the preview window frame according to the
-     * frame dimensions used by the camera device.
-     *
-     * When preview is started, it's not known (hard to define) what are going
-     * to be the dimensions of the frames that are going to be displayed. Plus,
-     * it might be possible, that such dimensions can be changed on the fly. So,
-     * in order to be always in sync with frame dimensions, this method is
-     * called for each frame passed to onNextFrameAvailable method, in order to
-     * properly adjust frame dimensions, used by the preview window.
-     * Note that this method must be called while object is locked.
-     * Param:
-     *  camera_dev - Camera device, prpviding frames displayed in the preview
-     *      window.
-     * Return:
-     *  true if cached dimensions have been adjusted, or false if cached
-     *  dimensions match device's frame dimensions.
-     */
-    bool adjustPreviewDimensions(EmulatedCameraDevice* camera_dev);
+ protected:
+  /* Adjusts cached dimensions of the preview window frame according to the
+   * frame dimensions used by the camera device.
+   *
+   * When preview is started, it's not known (hard to define) what are going
+   * to be the dimensions of the frames that are going to be displayed. Plus,
+   * it might be possible, that such dimensions can be changed on the fly. So,
+   * in order to be always in sync with frame dimensions, this method is
+   * called for each frame passed to onNextFrameAvailable method, in order to
+   * properly adjust frame dimensions, used by the preview window.
+   * Note that this method must be called while object is locked.
+   * Param:
+   *  camera_dev - Camera device, prpviding frames displayed in the preview
+   *      window.
+   * Return:
+   *  true if cached dimensions have been adjusted, or false if cached
+   *  dimensions match device's frame dimensions.
+   */
+  bool adjustPreviewDimensions(EmulatedCameraDevice* camera_dev);
 
-    /* Checks if it's the time to push new frame to the preview window.
-     * Note that this method must be called while object is locked. */
-    bool isPreviewTime();
+  /* Checks if it's the time to push new frame to the preview window.
+   * Note that this method must be called while object is locked. */
+  bool isPreviewTime();
 
-    /***************************************************************************
-     * Data members
-     **************************************************************************/
+  /***************************************************************************
+   * Data members
+   **************************************************************************/
 
-protected:
-    /* Locks this instance for data changes. */
-    Mutex                           mObjectLock;
+ protected:
+  /* Locks this instance for data changes. */
+  Mutex mObjectLock;
 
-    /* Preview window instance. */
-    preview_stream_ops*             mPreviewWindow;
+  /* Preview window instance. */
+  preview_stream_ops* mPreviewWindow;
 
-    /* Timestamp (abs. microseconds) when last frame has been pushed to the
-     * preview window. */
-    uint64_t                        mLastPreviewed;
+  /* Timestamp (abs. microseconds) when last frame has been pushed to the
+   * preview window. */
+  uint64_t mLastPreviewed;
 
-    /* Preview frequency in microseconds. */
-    uint32_t                        mPreviewAfter;
+  /* Preview frequency in microseconds. */
+  uint32_t mPreviewAfter;
 
-    /*
-     * Cached preview window frame dimensions.
-     */
+  /*
+   * Cached preview window frame dimensions.
+   */
 
-    int                             mPreviewFrameWidth;
-    int                             mPreviewFrameHeight;
+  int mPreviewFrameWidth;
+  int mPreviewFrameHeight;
 
-    /* Preview status. */
-    bool                            mPreviewEnabled;
+  /* Preview status. */
+  bool mPreviewEnabled;
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_PREVIEW_WINDOW_H */
+#endif /* HW_EMULATOR_CAMERA_PREVIEW_WINDOW_H */
diff --git a/guest/hals/camera/QemuClient.cpp b/guest/hals/camera/QemuClient.cpp
index 6c10dab..34ac015 100644
--- a/guest/hals/camera/QemuClient.cpp
+++ b/guest/hals/camera/QemuClient.cpp
@@ -21,15 +21,15 @@
 
 #define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_QemuClient"
+#include "QemuClient.h"
 #include <cutils/log.h>
 #include "EmulatedCamera.h"
-#include "QemuClient.h"
 
 #define LOG_QUERIES 0
 #if LOG_QUERIES
-#define LOGQ(...)   ALOGD(__VA_ARGS__)
+#define LOGQ(...) ALOGD(__VA_ARGS__)
 #else
-#define LOGQ(...)   (void(0))
+#define LOGQ(...) (void(0))
 
 #endif  // LOG_QUERIES
 namespace android {
@@ -45,9 +45,8 @@
       mReplyData(NULL),
       mReplySize(0),
       mReplyDataSize(0),
-      mReplyStatus(0)
-{
-    *mQuery = '\0';
+      mReplyStatus(0) {
+  *mQuery = '\0';
 }
 
 QemuQuery::QemuQuery(const char* query_string)
@@ -57,9 +56,8 @@
       mReplyData(NULL),
       mReplySize(0),
       mReplyDataSize(0),
-      mReplyStatus(0)
-{
-    mQueryDeliveryStatus = QemuQuery::createQuery(query_string, NULL);
+      mReplyStatus(0) {
+  mQueryDeliveryStatus = QemuQuery::createQuery(query_string, NULL);
 }
 
 QemuQuery::QemuQuery(const char* query_name, const char* query_param)
@@ -69,122 +67,115 @@
       mReplyData(NULL),
       mReplySize(0),
       mReplyDataSize(0),
-      mReplyStatus(0)
-{
-    mQueryDeliveryStatus = QemuQuery::createQuery(query_name, query_param);
+      mReplyStatus(0) {
+  mQueryDeliveryStatus = QemuQuery::createQuery(query_name, query_param);
 }
 
-QemuQuery::~QemuQuery()
-{
-    QemuQuery::resetQuery();
+QemuQuery::~QemuQuery() { QemuQuery::resetQuery(); }
+
+status_t QemuQuery::createQuery(const char* name, const char* param) {
+  /* Reset from the previous use. */
+  resetQuery();
+
+  /* Query name cannot be NULL or an empty string. */
+  if (name == NULL || *name == '\0') {
+    ALOGE("%s: NULL or an empty string is passed as query name.", __FUNCTION__);
+    mQueryDeliveryStatus = EINVAL;
+    return EINVAL;
+  }
+
+  const size_t name_len = strlen(name);
+  const size_t param_len = (param != NULL) ? strlen(param) : 0;
+  const size_t required = strlen(name) + (param_len ? (param_len + 2) : 1);
+
+  if (required > sizeof(mQueryPrealloc)) {
+    /* Preallocated buffer was too small. Allocate a bigger query buffer. */
+    mQuery = new char[required];
+    if (mQuery == NULL) {
+      ALOGE("%s: Unable to allocate %zu bytes for query buffer", __FUNCTION__,
+            required);
+      mQueryDeliveryStatus = ENOMEM;
+      return ENOMEM;
+    }
+  }
+
+  /* At this point mQuery buffer is big enough for the query. */
+  if (param_len) {
+    sprintf(mQuery, "%s %s", name, param);
+  } else {
+    memcpy(mQuery, name, name_len + 1);
+  }
+
+  return NO_ERROR;
 }
 
-status_t QemuQuery::createQuery(const char* name, const char* param)
-{
-    /* Reset from the previous use. */
-    resetQuery();
+status_t QemuQuery::completeQuery(status_t status) {
+  /* Save query completion status. */
+  mQueryDeliveryStatus = status;
+  if (mQueryDeliveryStatus != NO_ERROR) {
+    return mQueryDeliveryStatus;
+  }
 
-    /* Query name cannot be NULL or an empty string. */
-    if (name == NULL || *name == '\0') {
-        ALOGE("%s: NULL or an empty string is passed as query name.",
-             __FUNCTION__);
-        mQueryDeliveryStatus = EINVAL;
-        return EINVAL;
-    }
+  /* Make sure reply buffer contains at least 'ok', or 'ko'.
+   * Note that 'ok', or 'ko' prefixes are always 3 characters long: in case
+   * there are more data in the reply, that data will be separated from
+   * 'ok'/'ko' with a ':'. If there is no more data in the reply, the prefix
+   * will be
+   * zero-terminated, and the terminator will be inculded in the reply. */
+  if (mReplyBuffer == NULL || mReplySize < 3) {
+    ALOGE("%s: Invalid reply to the query", __FUNCTION__);
+    mQueryDeliveryStatus = EINVAL;
+    return EINVAL;
+  }
 
-    const size_t name_len = strlen(name);
-    const size_t param_len = (param != NULL) ? strlen(param) : 0;
-    const size_t required = strlen(name) + (param_len ? (param_len + 2) : 1);
-
-    if (required > sizeof(mQueryPrealloc)) {
-        /* Preallocated buffer was too small. Allocate a bigger query buffer. */
-        mQuery = new char[required];
-        if (mQuery == NULL) {
-            ALOGE("%s: Unable to allocate %zu bytes for query buffer",
-                 __FUNCTION__, required);
-            mQueryDeliveryStatus = ENOMEM;
-            return ENOMEM;
-        }
-    }
-
-    /* At this point mQuery buffer is big enough for the query. */
-    if (param_len) {
-        sprintf(mQuery, "%s %s", name, param);
-    } else {
-        memcpy(mQuery, name, name_len + 1);
-    }
-
-    return NO_ERROR;
-}
-
-status_t QemuQuery::completeQuery(status_t status)
-{
-    /* Save query completion status. */
-    mQueryDeliveryStatus = status;
-    if (mQueryDeliveryStatus != NO_ERROR) {
-        return mQueryDeliveryStatus;
-    }
-
-    /* Make sure reply buffer contains at least 'ok', or 'ko'.
-     * Note that 'ok', or 'ko' prefixes are always 3 characters long: in case
-     * there are more data in the reply, that data will be separated from 'ok'/'ko'
-     * with a ':'. If there is no more data in the reply, the prefix will be
-     * zero-terminated, and the terminator will be inculded in the reply. */
-    if (mReplyBuffer == NULL || mReplySize < 3) {
-        ALOGE("%s: Invalid reply to the query", __FUNCTION__);
-        mQueryDeliveryStatus = EINVAL;
-        return EINVAL;
-    }
-
-    /* Lets see the reply status. */
-    if (!memcmp(mReplyBuffer, "ok", 2)) {
-        mReplyStatus = 1;
-    } else if (!memcmp(mReplyBuffer, "ko", 2)) {
-        mReplyStatus = 0;
-    } else {
-        ALOGE("%s: Invalid query reply: '%s'", __FUNCTION__, mReplyBuffer);
-        mQueryDeliveryStatus = EINVAL;
-        return EINVAL;
-    }
-
-    /* Lets see if there are reply data that follow. */
-    if (mReplySize > 3) {
-        /* There are extra data. Make sure they are separated from the status
-         * with a ':' */
-        if (mReplyBuffer[2] != ':') {
-            ALOGE("%s: Invalid query reply: '%s'", __FUNCTION__, mReplyBuffer);
-            mQueryDeliveryStatus = EINVAL;
-            return EINVAL;
-        }
-        mReplyData = mReplyBuffer + 3;
-        mReplyDataSize = mReplySize - 3;
-    } else {
-        /* Make sure reply buffer containing just 'ok'/'ko' ends with
-         * zero-terminator. */
-        if (mReplyBuffer[2] != '\0') {
-            ALOGE("%s: Invalid query reply: '%s'", __FUNCTION__, mReplyBuffer);
-            mQueryDeliveryStatus = EINVAL;
-            return EINVAL;
-        }
-    }
-
-    return NO_ERROR;
-}
-
-void QemuQuery::resetQuery()
-{
-    if (mQuery != NULL && mQuery != mQueryPrealloc) {
-        delete[] mQuery;
-    }
-    mQuery = mQueryPrealloc;
-    mQueryDeliveryStatus = NO_ERROR;
-    if (mReplyBuffer != NULL) {
-        free(mReplyBuffer);
-        mReplyBuffer = NULL;
-    }
-    mReplyData = NULL;
-    mReplySize = mReplyDataSize = 0;
+  /* Lets see the reply status. */
+  if (!memcmp(mReplyBuffer, "ok", 2)) {
+    mReplyStatus = 1;
+  } else if (!memcmp(mReplyBuffer, "ko", 2)) {
     mReplyStatus = 0;
+  } else {
+    ALOGE("%s: Invalid query reply: '%s'", __FUNCTION__, mReplyBuffer);
+    mQueryDeliveryStatus = EINVAL;
+    return EINVAL;
+  }
+
+  /* Lets see if there are reply data that follow. */
+  if (mReplySize > 3) {
+    /* There are extra data. Make sure they are separated from the status
+     * with a ':' */
+    if (mReplyBuffer[2] != ':') {
+      ALOGE("%s: Invalid query reply: '%s'", __FUNCTION__, mReplyBuffer);
+      mQueryDeliveryStatus = EINVAL;
+      return EINVAL;
+    }
+    mReplyData = mReplyBuffer + 3;
+    mReplyDataSize = mReplySize - 3;
+  } else {
+    /* Make sure reply buffer containing just 'ok'/'ko' ends with
+     * zero-terminator. */
+    if (mReplyBuffer[2] != '\0') {
+      ALOGE("%s: Invalid query reply: '%s'", __FUNCTION__, mReplyBuffer);
+      mQueryDeliveryStatus = EINVAL;
+      return EINVAL;
+    }
+  }
+
+  return NO_ERROR;
+}
+
+void QemuQuery::resetQuery() {
+  if (mQuery != NULL && mQuery != mQueryPrealloc) {
+    delete[] mQuery;
+  }
+  mQuery = mQueryPrealloc;
+  mQueryDeliveryStatus = NO_ERROR;
+  if (mReplyBuffer != NULL) {
+    free(mReplyBuffer);
+    mReplyBuffer = NULL;
+  }
+  mReplyData = NULL;
+  mReplySize = mReplyDataSize = 0;
+  mReplyStatus = 0;
 }
 
 /****************************************************************************
@@ -192,176 +183,167 @@
  ***************************************************************************/
 
 /* Camera service name. */
-const char QemuClient::mCameraServiceName[]   = "camera";
+const char QemuClient::mCameraServiceName[] = "camera";
 
-QemuClient::QemuClient()
-    : mPipeFD(-1)
-{
-}
+QemuClient::QemuClient() : mPipeFD(-1) {}
 
-QemuClient::~QemuClient()
-{
-    if (mPipeFD >= 0) {
-        close(mPipeFD);
-    }
+QemuClient::~QemuClient() {
+  if (mPipeFD >= 0) {
+    close(mPipeFD);
+  }
 }
 
 /****************************************************************************
  * Qemu client API
  ***************************************************************************/
 
-status_t QemuClient::connectClient(const char* param)
-{
-    ALOGV("%s: '%s'", __FUNCTION__, param ? param : "");
+status_t QemuClient::connectClient(const char* param) {
+  ALOGV("%s: '%s'", __FUNCTION__, param ? param : "");
 
-    /* Make sure that client is not connected already. */
-    if (mPipeFD >= 0) {
-        ALOGE("%s: Qemu client is already connected", __FUNCTION__);
-        return EINVAL;
-    }
+  /* Make sure that client is not connected already. */
+  if (mPipeFD >= 0) {
+    ALOGE("%s: Qemu client is already connected", __FUNCTION__);
+    return EINVAL;
+  }
 
-    /* Select one of the two: 'factory', or 'emulated camera' service */
-    if (param == NULL || *param == '\0') {
-        /* No parameters: connect to the factory service. */
-        char pipe_name[512];
-        snprintf(pipe_name, sizeof(pipe_name), "qemud:%s", mCameraServiceName);
-        mPipeFD = qemu_pipe_open(pipe_name);
-    } else {
-        /* One extra char ':' that separates service name and parameters + six
-         * characters for 'qemud:'. This is required by qemu pipe protocol. */
-        char* connection_str = new char[strlen(mCameraServiceName) +
-                                        strlen(param) + 8];
-        sprintf(connection_str, "qemud:%s:%s", mCameraServiceName, param);
+  /* Select one of the two: 'factory', or 'emulated camera' service */
+  if (param == NULL || *param == '\0') {
+    /* No parameters: connect to the factory service. */
+    char pipe_name[512];
+    snprintf(pipe_name, sizeof(pipe_name), "qemud:%s", mCameraServiceName);
+    mPipeFD = qemu_pipe_open(pipe_name);
+  } else {
+    /* One extra char ':' that separates service name and parameters + six
+     * characters for 'qemud:'. This is required by qemu pipe protocol. */
+    char* connection_str =
+        new char[strlen(mCameraServiceName) + strlen(param) + 8];
+    sprintf(connection_str, "qemud:%s:%s", mCameraServiceName, param);
 
-        mPipeFD = qemu_pipe_open(connection_str);
-        delete[] connection_str;
-    }
-    if (mPipeFD < 0) {
-        ALOGE("%s: Unable to connect to the camera service '%s': %s",
-             __FUNCTION__, param ? param : "Factory", strerror(errno));
-        return errno ? errno : EINVAL;
-    }
+    mPipeFD = qemu_pipe_open(connection_str);
+    delete[] connection_str;
+  }
+  if (mPipeFD < 0) {
+    ALOGE("%s: Unable to connect to the camera service '%s': %s", __FUNCTION__,
+          param ? param : "Factory", strerror(errno));
+    return errno ? errno : EINVAL;
+  }
 
+  return NO_ERROR;
+}
+
+void QemuClient::disconnectClient() {
+  ALOGV("%s", __FUNCTION__);
+
+  if (mPipeFD >= 0) {
+    close(mPipeFD);
+    mPipeFD = -1;
+  }
+}
+
+status_t QemuClient::sendMessage(const void* data, size_t data_size) {
+  if (mPipeFD < 0) {
+    ALOGE("%s: Qemu client is not connected", __FUNCTION__);
+    return EINVAL;
+  }
+
+  /* Note that we don't use here qemud_client_send, since with qemu pipes we
+   * don't need to provide payload size prior to payload when we're writing to
+   * the pipe. So, we can use simple write, and qemu pipe will take care of the
+   * rest, calling the receiving end with the number of bytes transferred. */
+  const size_t written = qemud_fd_write(mPipeFD, data, data_size);
+  if (written == data_size) {
     return NO_ERROR;
+  } else {
+    ALOGE("%s: Error sending data via qemu pipe: '%s'", __FUNCTION__,
+          strerror(errno));
+    return errno ? errno : EIO;
+  }
 }
 
-void QemuClient::disconnectClient()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t QemuClient::receiveMessage(void** data, size_t* data_size) {
+  *data = NULL;
+  *data_size = 0;
 
-    if (mPipeFD >= 0) {
-        close(mPipeFD);
-        mPipeFD = -1;
-    }
-}
+  if (mPipeFD < 0) {
+    ALOGE("%s: Qemu client is not connected", __FUNCTION__);
+    return EINVAL;
+  }
 
-status_t QemuClient::sendMessage(const void* data, size_t data_size)
-{
-    if (mPipeFD < 0) {
-        ALOGE("%s: Qemu client is not connected", __FUNCTION__);
-        return EINVAL;
-    }
+  /* The way the service replies to a query, it sends payload size first, and
+   * then it sends the payload itself. Note that payload size is sent as a
+   * string, containing 8 characters representing a hexadecimal payload size
+   * value. Note also, that the string doesn't contain zero-terminator. */
+  size_t payload_size;
+  char payload_size_str[9];
+  int rd_res = qemud_fd_read(mPipeFD, payload_size_str, 8);
+  if (rd_res != 8) {
+    ALOGE("%s: Unable to obtain payload size: %s", __FUNCTION__,
+          strerror(errno));
+    return errno ? errno : EIO;
+  }
 
-    /* Note that we don't use here qemud_client_send, since with qemu pipes we
-     * don't need to provide payload size prior to payload when we're writing to
-     * the pipe. So, we can use simple write, and qemu pipe will take care of the
-     * rest, calling the receiving end with the number of bytes transferred. */
-    const size_t written = qemud_fd_write(mPipeFD, data, data_size);
-    if (written == data_size) {
-        return NO_ERROR;
-    } else {
-        ALOGE("%s: Error sending data via qemu pipe: '%s'",
-             __FUNCTION__, strerror(errno));
-        return errno ? errno : EIO;
-    }
-}
+  /* Convert payload size. */
+  errno = 0;
+  payload_size_str[8] = '\0';
+  payload_size = strtol(payload_size_str, NULL, 16);
+  if (errno) {
+    ALOGE("%s: Invalid payload size '%s'", __FUNCTION__, payload_size_str);
+    return EIO;
+  }
 
-status_t QemuClient::receiveMessage(void** data, size_t* data_size)
-{
+  /* Allocate payload data buffer, and read the payload there. */
+  *data = malloc(payload_size);
+  if (*data == NULL) {
+    ALOGE("%s: Unable to allocate %zu bytes payload buffer", __FUNCTION__,
+          payload_size);
+    return ENOMEM;
+  }
+  rd_res = qemud_fd_read(mPipeFD, *data, payload_size);
+  if (static_cast<size_t>(rd_res) == payload_size) {
+    *data_size = payload_size;
+    return NO_ERROR;
+  } else {
+    ALOGE("%s: Read size %d doesnt match expected payload size %zu: %s",
+          __FUNCTION__, rd_res, payload_size, strerror(errno));
+    free(*data);
     *data = NULL;
-    *data_size = 0;
-
-    if (mPipeFD < 0) {
-        ALOGE("%s: Qemu client is not connected", __FUNCTION__);
-        return EINVAL;
-    }
-
-    /* The way the service replies to a query, it sends payload size first, and
-     * then it sends the payload itself. Note that payload size is sent as a
-     * string, containing 8 characters representing a hexadecimal payload size
-     * value. Note also, that the string doesn't contain zero-terminator. */
-    size_t payload_size;
-    char payload_size_str[9];
-    int rd_res = qemud_fd_read(mPipeFD, payload_size_str, 8);
-    if (rd_res != 8) {
-        ALOGE("%s: Unable to obtain payload size: %s",
-             __FUNCTION__, strerror(errno));
-        return errno ? errno : EIO;
-    }
-
-    /* Convert payload size. */
-    errno = 0;
-    payload_size_str[8] = '\0';
-    payload_size = strtol(payload_size_str, NULL, 16);
-    if (errno) {
-        ALOGE("%s: Invalid payload size '%s'", __FUNCTION__, payload_size_str);
-        return EIO;
-    }
-
-    /* Allocate payload data buffer, and read the payload there. */
-    *data = malloc(payload_size);
-    if (*data == NULL) {
-        ALOGE("%s: Unable to allocate %zu bytes payload buffer",
-             __FUNCTION__, payload_size);
-        return ENOMEM;
-    }
-    rd_res = qemud_fd_read(mPipeFD, *data, payload_size);
-    if (static_cast<size_t>(rd_res) == payload_size) {
-        *data_size = payload_size;
-        return NO_ERROR;
-    } else {
-        ALOGE("%s: Read size %d doesnt match expected payload size %zu: %s",
-             __FUNCTION__, rd_res, payload_size, strerror(errno));
-        free(*data);
-        *data = NULL;
-        return errno ? errno : EIO;
-    }
+    return errno ? errno : EIO;
+  }
 }
 
-status_t QemuClient::doQuery(QemuQuery* query)
-{
-    /* Make sure that query has been successfuly constructed. */
-    if (query->mQueryDeliveryStatus != NO_ERROR) {
-        ALOGE("%s: Query is invalid", __FUNCTION__);
-        return query->mQueryDeliveryStatus;
-    }
+status_t QemuClient::doQuery(QemuQuery* query) {
+  /* Make sure that query has been successfuly constructed. */
+  if (query->mQueryDeliveryStatus != NO_ERROR) {
+    ALOGE("%s: Query is invalid", __FUNCTION__);
+    return query->mQueryDeliveryStatus;
+  }
 
-    LOGQ("Send query '%s'", query->mQuery);
+  LOGQ("Send query '%s'", query->mQuery);
 
-    /* Send the query. */
-    status_t res = sendMessage(query->mQuery, strlen(query->mQuery) + 1);
+  /* Send the query. */
+  status_t res = sendMessage(query->mQuery, strlen(query->mQuery) + 1);
+  if (res == NO_ERROR) {
+    /* Read the response. */
+    res = receiveMessage(reinterpret_cast<void**>(&query->mReplyBuffer),
+                         &query->mReplySize);
     if (res == NO_ERROR) {
-        /* Read the response. */
-        res = receiveMessage(reinterpret_cast<void**>(&query->mReplyBuffer),
-                      &query->mReplySize);
-        if (res == NO_ERROR) {
-            LOGQ("Response to query '%s': Status = '%.2s', %d bytes in response",
-                 query->mQuery, query->mReplyBuffer, query->mReplySize);
-        } else {
-            ALOGE("%s Response to query '%s' has failed: %s",
-                 __FUNCTION__, query->mQuery, strerror(res));
-        }
+      LOGQ("Response to query '%s': Status = '%.2s', %d bytes in response",
+           query->mQuery, query->mReplyBuffer, query->mReplySize);
     } else {
-        ALOGE("%s: Send query '%s' failed: %s",
-             __FUNCTION__, query->mQuery, strerror(res));
+      ALOGE("%s Response to query '%s' has failed: %s", __FUNCTION__,
+            query->mQuery, strerror(res));
     }
+  } else {
+    ALOGE("%s: Send query '%s' failed: %s", __FUNCTION__, query->mQuery,
+          strerror(res));
+  }
 
-    /* Complete the query, and return its completion handling status. */
-    const status_t res1 = query->completeQuery(res);
-    ALOGE_IF(res1 != NO_ERROR && res1 != res,
-            "%s: Error %d in query '%s' completion",
-            __FUNCTION__, res1, query->mQuery);
-    return res1;
+  /* Complete the query, and return its completion handling status. */
+  const status_t res1 = query->completeQuery(res);
+  ALOGE_IF(res1 != NO_ERROR && res1 != res,
+           "%s: Error %d in query '%s' completion", __FUNCTION__, res1,
+           query->mQuery);
+  return res1;
 }
 
 /****************************************************************************
@@ -375,43 +357,37 @@
 /* Queries list of cameras connected to the host. */
 const char FactoryQemuClient::mQueryList[] = "list";
 
-FactoryQemuClient::FactoryQemuClient()
-    : QemuClient()
-{
-}
+FactoryQemuClient::FactoryQemuClient() : QemuClient() {}
 
-FactoryQemuClient::~FactoryQemuClient()
-{
-}
+FactoryQemuClient::~FactoryQemuClient() {}
 
-status_t FactoryQemuClient::listCameras(char** list)
-{
-    ALOGV("%s", __FUNCTION__);
+status_t FactoryQemuClient::listCameras(char** list) {
+  ALOGV("%s", __FUNCTION__);
 
-    QemuQuery query(mQueryList);
-    if (doQuery(&query) || !query.isQuerySucceeded()) {
-        ALOGE("%s: List cameras query failed: %s", __FUNCTION__,
-             query.mReplyData ? query.mReplyData : "No error message");
-        return query.getCompletionStatus();
-    }
+  QemuQuery query(mQueryList);
+  if (doQuery(&query) || !query.isQuerySucceeded()) {
+    ALOGE("%s: List cameras query failed: %s", __FUNCTION__,
+          query.mReplyData ? query.mReplyData : "No error message");
+    return query.getCompletionStatus();
+  }
 
-    /* Make sure there is a list returned. */
-    if (query.mReplyDataSize == 0) {
-        ALOGE("%s: No camera list is returned.", __FUNCTION__);
-        return EINVAL;
-    }
+  /* Make sure there is a list returned. */
+  if (query.mReplyDataSize == 0) {
+    ALOGE("%s: No camera list is returned.", __FUNCTION__);
+    return EINVAL;
+  }
 
-    /* Copy the list over. */
-    *list = (char*)malloc(query.mReplyDataSize);
-    if (*list != NULL) {
-        memcpy(*list, query.mReplyData, query.mReplyDataSize);
-        ALOGD("Emulated camera list: %s", *list);
-        return NO_ERROR;
-    } else {
-        ALOGE("%s: Unable to allocate %zu bytes",
-             __FUNCTION__, query.mReplyDataSize);
-        return ENOMEM;
-    }
+  /* Copy the list over. */
+  *list = (char*)malloc(query.mReplyDataSize);
+  if (*list != NULL) {
+    memcpy(*list, query.mReplyData, query.mReplyDataSize);
+    ALOGD("Emulated camera list: %s", *list);
+    return NO_ERROR;
+  } else {
+    ALOGE("%s: Unable to allocate %zu bytes", __FUNCTION__,
+          query.mReplyDataSize);
+    return ENOMEM;
+  }
 }
 
 /****************************************************************************
@@ -423,137 +399,118 @@
  */
 
 /* Connect to the camera device. */
-const char CameraQemuClient::mQueryConnect[]    = "connect";
+const char CameraQemuClient::mQueryConnect[] = "connect";
 /* Disconect from the camera device. */
 const char CameraQemuClient::mQueryDisconnect[] = "disconnect";
 /* Start capturing video from the camera device. */
-const char CameraQemuClient::mQueryStart[]      = "start";
+const char CameraQemuClient::mQueryStart[] = "start";
 /* Stop capturing video from the camera device. */
-const char CameraQemuClient::mQueryStop[]       = "stop";
+const char CameraQemuClient::mQueryStop[] = "stop";
 /* Get next video frame from the camera device. */
-const char CameraQemuClient::mQueryFrame[]      = "frame";
+const char CameraQemuClient::mQueryFrame[] = "frame";
 
-CameraQemuClient::CameraQemuClient()
-    : QemuClient()
-{
+CameraQemuClient::CameraQemuClient() : QemuClient() {}
+
+CameraQemuClient::~CameraQemuClient() {}
+
+status_t CameraQemuClient::queryConnect() {
+  ALOGV("%s", __FUNCTION__);
+
+  QemuQuery query(mQueryConnect);
+  doQuery(&query);
+  const status_t res = query.getCompletionStatus();
+  ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s", __FUNCTION__,
+           query.mReplyData ? query.mReplyData : "No error message");
+  return res;
 }
 
-CameraQemuClient::~CameraQemuClient()
-{
+status_t CameraQemuClient::queryDisconnect() {
+  ALOGV("%s", __FUNCTION__);
 
+  QemuQuery query(mQueryDisconnect);
+  doQuery(&query);
+  const status_t res = query.getCompletionStatus();
+  ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s", __FUNCTION__,
+           query.mReplyData ? query.mReplyData : "No error message");
+  return res;
 }
 
-status_t CameraQemuClient::queryConnect()
-{
-    ALOGV("%s", __FUNCTION__);
+status_t CameraQemuClient::queryStart(uint32_t pixel_format, int width,
+                                      int height) {
+  ALOGV("%s", __FUNCTION__);
 
-    QemuQuery query(mQueryConnect);
-    doQuery(&query);
-    const status_t res = query.getCompletionStatus();
-    ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s",
-            __FUNCTION__, query.mReplyData ? query.mReplyData :
-                                             "No error message");
+  char query_str[256];
+  snprintf(query_str, sizeof(query_str), "%s dim=%dx%d pix=%d", mQueryStart,
+           width, height, pixel_format);
+  QemuQuery query(query_str);
+  doQuery(&query);
+  const status_t res = query.getCompletionStatus();
+  ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s", __FUNCTION__,
+           query.mReplyData ? query.mReplyData : "No error message");
+  return res;
+}
+
+status_t CameraQemuClient::queryStop() {
+  ALOGV("%s", __FUNCTION__);
+
+  QemuQuery query(mQueryStop);
+  doQuery(&query);
+  const status_t res = query.getCompletionStatus();
+  ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s", __FUNCTION__,
+           query.mReplyData ? query.mReplyData : "No error message");
+  return res;
+}
+
+status_t CameraQemuClient::queryFrame(void* vframe, void* pframe,
+                                      size_t vframe_size, size_t pframe_size,
+                                      float r_scale, float g_scale,
+                                      float b_scale, float exposure_comp) {
+  ALOGV("%s", __FUNCTION__);
+
+  char query_str[256];
+  snprintf(query_str, sizeof(query_str),
+           "%s video=%zu preview=%zu whiteb=%g,%g,%g expcomp=%g", mQueryFrame,
+           (vframe && vframe_size) ? vframe_size : 0,
+           (pframe && pframe_size) ? pframe_size : 0, r_scale, g_scale, b_scale,
+           exposure_comp);
+  QemuQuery query(query_str);
+  doQuery(&query);
+  const status_t res = query.getCompletionStatus();
+  if (res != NO_ERROR) {
+    ALOGE("%s: Query failed: %s", __FUNCTION__,
+          query.mReplyData ? query.mReplyData : "No error message");
     return res;
-}
+  }
 
-status_t CameraQemuClient::queryDisconnect()
-{
-    ALOGV("%s", __FUNCTION__);
-
-    QemuQuery query(mQueryDisconnect);
-    doQuery(&query);
-    const status_t res = query.getCompletionStatus();
-    ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s",
-            __FUNCTION__, query.mReplyData ? query.mReplyData :
-                                             "No error message");
-    return res;
-}
-
-status_t CameraQemuClient::queryStart(uint32_t pixel_format,
-                                      int width,
-                                      int height)
-{
-    ALOGV("%s", __FUNCTION__);
-
-    char query_str[256];
-    snprintf(query_str, sizeof(query_str), "%s dim=%dx%d pix=%d",
-             mQueryStart, width, height, pixel_format);
-    QemuQuery query(query_str);
-    doQuery(&query);
-    const status_t res = query.getCompletionStatus();
-    ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s",
-            __FUNCTION__, query.mReplyData ? query.mReplyData :
-                                             "No error message");
-    return res;
-}
-
-status_t CameraQemuClient::queryStop()
-{
-    ALOGV("%s", __FUNCTION__);
-
-    QemuQuery query(mQueryStop);
-    doQuery(&query);
-    const status_t res = query.getCompletionStatus();
-    ALOGE_IF(res != NO_ERROR, "%s: Query failed: %s",
-            __FUNCTION__, query.mReplyData ? query.mReplyData :
-                                             "No error message");
-    return res;
-}
-
-status_t CameraQemuClient::queryFrame(void* vframe,
-                                      void* pframe,
-                                      size_t vframe_size,
-                                      size_t pframe_size,
-                                      float r_scale,
-                                      float g_scale,
-                                      float b_scale,
-                                      float exposure_comp)
-{
-    ALOGV("%s", __FUNCTION__);
-
-    char query_str[256];
-    snprintf(query_str, sizeof(query_str), "%s video=%zu preview=%zu whiteb=%g,%g,%g expcomp=%g",
-             mQueryFrame, (vframe && vframe_size) ? vframe_size : 0,
-             (pframe && pframe_size) ? pframe_size : 0, r_scale, g_scale, b_scale,
-             exposure_comp);
-    QemuQuery query(query_str);
-    doQuery(&query);
-    const status_t res = query.getCompletionStatus();
-    if( res != NO_ERROR) {
-        ALOGE("%s: Query failed: %s",
-             __FUNCTION__, query.mReplyData ? query.mReplyData :
-                                              "No error message");
-        return res;
+  /* Copy requested frames. */
+  size_t cur_offset = 0;
+  const uint8_t* frame = reinterpret_cast<const uint8_t*>(query.mReplyData);
+  /* Video frame is always first. */
+  if (vframe != NULL && vframe_size != 0) {
+    /* Make sure that video frame is in. */
+    if ((query.mReplyDataSize - cur_offset) >= vframe_size) {
+      memcpy(vframe, frame, vframe_size);
+      cur_offset += vframe_size;
+    } else {
+      ALOGE("%s: Reply %zu bytes is to small to contain %zu bytes video frame",
+            __FUNCTION__, query.mReplyDataSize - cur_offset, vframe_size);
+      return EINVAL;
     }
-
-    /* Copy requested frames. */
-    size_t cur_offset = 0;
-    const uint8_t* frame = reinterpret_cast<const uint8_t*>(query.mReplyData);
-    /* Video frame is always first. */
-    if (vframe != NULL && vframe_size != 0) {
-        /* Make sure that video frame is in. */
-        if ((query.mReplyDataSize - cur_offset) >= vframe_size) {
-            memcpy(vframe, frame, vframe_size);
-            cur_offset += vframe_size;
-        } else {
-            ALOGE("%s: Reply %zu bytes is to small to contain %zu bytes video frame",
-                 __FUNCTION__, query.mReplyDataSize - cur_offset, vframe_size);
-            return EINVAL;
-        }
+  }
+  if (pframe != NULL && pframe_size != 0) {
+    /* Make sure that preview frame is in. */
+    if ((query.mReplyDataSize - cur_offset) >= pframe_size) {
+      memcpy(pframe, frame + cur_offset, pframe_size);
+      cur_offset += pframe_size;
+    } else {
+      ALOGE(
+          "%s: Reply %zu bytes is to small to contain %zu bytes preview frame",
+          __FUNCTION__, query.mReplyDataSize - cur_offset, pframe_size);
+      return EINVAL;
     }
-    if (pframe != NULL && pframe_size != 0) {
-        /* Make sure that preview frame is in. */
-        if ((query.mReplyDataSize - cur_offset) >= pframe_size) {
-            memcpy(pframe, frame + cur_offset, pframe_size);
-            cur_offset += pframe_size;
-        } else {
-            ALOGE("%s: Reply %zu bytes is to small to contain %zu bytes preview frame",
-                 __FUNCTION__, query.mReplyDataSize - cur_offset, pframe_size);
-            return EINVAL;
-        }
-    }
+  }
 
-    return NO_ERROR;
+  return NO_ERROR;
 }
 
 }; /* namespace android */
diff --git a/guest/hals/camera/QemuClient.h b/guest/hals/camera/QemuClient.h
index 1644321..290ab41 100644
--- a/guest/hals/camera/QemuClient.h
+++ b/guest/hals/camera/QemuClient.h
@@ -18,8 +18,8 @@
 #define HW_EMULATOR_CAMERA_QEMU_CLIENT_H
 
 /*
- * Contains declaration of classes that encapsulate connection to camera services
- * in the emulator via qemu pipe.
+ * Contains declaration of classes that encapsulate connection to camera
+ * services in the emulator via qemu pipe.
  */
 
 #include <hardware/qemud.h>
@@ -67,125 +67,125 @@
  *  - 'ok' Encoding the success
  *  - 'ko' Encoding a failure.
  * After that payload may have optional data. If payload has more data following
- * the query result, there is a ':' character separating them. If payload carries
- * only the result, it always ends with a zero-terminator. So, payload 'ok'/'ko'
- * prefix is always 3 bytes long: it either includes a zero-terminator, if there
- * is no data, or a ':' separator.
+ * the query result, there is a ':' character separating them. If payload
+ * carries only the result, it always ends with a zero-terminator. So, payload
+ * 'ok'/'ko' prefix is always 3 bytes long: it either includes a
+ * zero-terminator, if there is no data, or a ':' separator.
  */
 class QemuQuery {
-public:
-    /* Constructs an uninitialized QemuQuery instance. */
-    QemuQuery();
+ public:
+  /* Constructs an uninitialized QemuQuery instance. */
+  QemuQuery();
 
-    /* Constructs and initializes QemuQuery instance for a query.
-     * Param:
-     *  query_string - Query string. This constructor can also be used to
-     *      construct a query that doesn't have parameters. In this case query
-     *      name can be passed as a parameter here.
-     */
-    explicit QemuQuery(const char* query_string);
+  /* Constructs and initializes QemuQuery instance for a query.
+   * Param:
+   *  query_string - Query string. This constructor can also be used to
+   *      construct a query that doesn't have parameters. In this case query
+   *      name can be passed as a parameter here.
+   */
+  explicit QemuQuery(const char* query_string);
 
-    /* Constructs and initializes QemuQuery instance for a query with parameters.
-     * Param:
-     *  query_name - Query name.
-     *  query_param - Query parameters. Can be NULL.
-     */
-    QemuQuery(const char* query_name, const char* query_param);
+  /* Constructs and initializes QemuQuery instance for a query with parameters.
+   * Param:
+   *  query_name - Query name.
+   *  query_param - Query parameters. Can be NULL.
+   */
+  QemuQuery(const char* query_name, const char* query_param);
 
-    /* Destructs QemuQuery instance. */
-    ~QemuQuery();
+  /* Destructs QemuQuery instance. */
+  ~QemuQuery();
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-    /* Creates new query.
-     * Note: this method will reset this instance prior to creating a new query
-     * in order to discard possible "leftovers" from the previous query.
-     * Param:
-     *  query_name - Query name.
-     *  query_param - Query parameters. Can be NULL.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    status_t createQuery(const char* name, const char* param);
+  /* Creates new query.
+   * Note: this method will reset this instance prior to creating a new query
+   * in order to discard possible "leftovers" from the previous query.
+   * Param:
+   *  query_name - Query name.
+   *  query_param - Query parameters. Can be NULL.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  status_t createQuery(const char* name, const char* param);
 
-    /* Completes the query after a reply from the emulator.
-     * This method will parse the reply buffer, and calculate the final query
-     * status, which depends not only on the transport success / failure, but
-     * also on 'ok' / 'ko' in the reply buffer.
-     * Param:
-     *  status - Query delivery status. This status doesn't necessarily reflects
-     *      the final query status (which is defined by 'ok'/'ko' prefix in the
-     *      reply buffer). This status simply states whether or not the query has
-     *      been sent, and a reply has been received successfuly. However, if
-     *      this status indicates a failure, it means that the entire query has
-     *      failed.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure. Note that
-     *  status returned here just signals whether or not the method has succeeded.
-     *  Use isQuerySucceeded() / getCompletionStatus() methods of this class to
-     *  check the final query status.
-     */
-    status_t completeQuery(status_t status);
+  /* Completes the query after a reply from the emulator.
+   * This method will parse the reply buffer, and calculate the final query
+   * status, which depends not only on the transport success / failure, but
+   * also on 'ok' / 'ko' in the reply buffer.
+   * Param:
+   *  status - Query delivery status. This status doesn't necessarily reflects
+   *      the final query status (which is defined by 'ok'/'ko' prefix in the
+   *      reply buffer). This status simply states whether or not the query has
+   *      been sent, and a reply has been received successfuly. However, if
+   *      this status indicates a failure, it means that the entire query has
+   *      failed.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure. Note that
+   *  status returned here just signals whether or not the method has succeeded.
+   *  Use isQuerySucceeded() / getCompletionStatus() methods of this class to
+   *  check the final query status.
+   */
+  status_t completeQuery(status_t status);
 
-    /* Resets the query from a previous use. */
-    void resetQuery();
+  /* Resets the query from a previous use. */
+  void resetQuery();
 
-    /* Checks if query has succeeded.
-     * Note that this method must be called after completeQuery() method of this
-     * class has been executed.
-     */
-    inline bool isQuerySucceeded() const {
-        return mQueryDeliveryStatus == NO_ERROR && mReplyStatus != 0;
+  /* Checks if query has succeeded.
+   * Note that this method must be called after completeQuery() method of this
+   * class has been executed.
+   */
+  inline bool isQuerySucceeded() const {
+    return mQueryDeliveryStatus == NO_ERROR && mReplyStatus != 0;
+  }
+
+  /* Gets final completion status of the query.
+   * Note that this method must be called after completeQuery() method of this
+   * class has been executed.
+   * Return:
+   *  NO_ERROR if query has succeeded, or an appropriate error status on query
+   *  failure.
+   */
+  inline status_t getCompletionStatus() const {
+    if (mQueryDeliveryStatus == NO_ERROR) {
+      if (mReplyStatus) {
+        return NO_ERROR;
+      } else {
+        return EINVAL;
+      }
+    } else {
+      return mQueryDeliveryStatus;
     }
+  }
 
-    /* Gets final completion status of the query.
-     * Note that this method must be called after completeQuery() method of this
-     * class has been executed.
-     * Return:
-     *  NO_ERROR if query has succeeded, or an appropriate error status on query
-     *  failure.
-     */
-    inline status_t getCompletionStatus() const {
-        if (mQueryDeliveryStatus == NO_ERROR) {
-            if (mReplyStatus) {
-                return NO_ERROR;
-            } else {
-                return EINVAL;
-            }
-        } else {
-            return mQueryDeliveryStatus;
-        }
-    }
+  /****************************************************************************
+   * Public data memebers
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Public data memebers
-     ***************************************************************************/
+ public:
+  /* Query string. */
+  char* mQuery;
+  /* Query delivery status. */
+  status_t mQueryDeliveryStatus;
+  /* Reply buffer */
+  char* mReplyBuffer;
+  /* Reply data (past 'ok'/'ko'). If NULL, there were no data in reply. */
+  char* mReplyData;
+  /* Reply buffer size. */
+  size_t mReplySize;
+  /* Reply data size. */
+  size_t mReplyDataSize;
+  /* Reply status: 1 - ok, 0 - ko. */
+  int mReplyStatus;
 
-public:
-    /* Query string. */
-    char*       mQuery;
-    /* Query delivery status. */
-    status_t    mQueryDeliveryStatus;
-    /* Reply buffer */
-    char*       mReplyBuffer;
-    /* Reply data (past 'ok'/'ko'). If NULL, there were no data in reply. */
-    char*       mReplyData;
-    /* Reply buffer size. */
-    size_t      mReplySize;
-    /* Reply data size. */
-    size_t      mReplyDataSize;
-    /* Reply status: 1 - ok, 0 - ko. */
-    int         mReplyStatus;
+  /****************************************************************************
+   * Private data memebers
+   ***************************************************************************/
 
-    /****************************************************************************
-     * Private data memebers
-     ***************************************************************************/
-
-protected:
-    /* Preallocated buffer for small queries. */
-    char    mQueryPrealloc[256];
+ protected:
+  /* Preallocated buffer for small queries. */
+  char mQueryPrealloc[256];
 };
 
 /****************************************************************************
@@ -196,100 +196,100 @@
  * pipe.
  */
 class QemuClient {
-public:
-    /* Constructs QemuClient instance. */
-    QemuClient();
+ public:
+  /* Constructs QemuClient instance. */
+  QemuClient();
 
-    /* Destructs QemuClient instance. */
-    virtual ~QemuClient();
+  /* Destructs QemuClient instance. */
+  virtual ~QemuClient();
 
-    /****************************************************************************
-     * Qemu client API
-     ***************************************************************************/
+  /****************************************************************************
+   * Qemu client API
+   ***************************************************************************/
 
-public:
-    /* Connects to the 'camera' service in the emulator via qemu pipe.
-     * Param:
-     *  param - Parameters to pass to the camera service. There are two types of
-     *      camera services implemented by the emulator. The first one is a
-     *      'camera factory' type of service that provides list of cameras
-     *      connected to the host. Another one is an 'emulated camera' type of
-     *      service that provides interface to a camera connected to the host. At
-     *      the connection time emulator makes distinction between the two by
-     *      looking at connection parameters: no parameters means connection to
-     *      the 'factory' service, while connection with parameters means
-     *      connection to an 'emulated camera' service, where camera is identified
-     *      by one of the connection parameters. So, passing NULL, or an empty
-     *      string to this method will establish a connection with the 'factory'
-     *      service, while not empty string passed here will establish connection
-     *      with an 'emulated camera' service. Parameters defining the emulated
-     *      camera must be formatted as such:
-     *
-     *          "name=<device name> [inp_channel=<input channel #>]",
-     *
-     *      where 'device name' is a required parameter defining name of the
-     *      camera device, and 'input channel' is an optional parameter (positive
-     *      integer), defining the input channel to use on the camera device.
-     *      Note that device name passed here must have been previously obtained
-     *      from the factory service using 'list' query.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status.
-     */
-    virtual status_t connectClient(const char* param);
+ public:
+  /* Connects to the 'camera' service in the emulator via qemu pipe.
+   * Param:
+   *  param - Parameters to pass to the camera service. There are two types of
+   *      camera services implemented by the emulator. The first one is a
+   *      'camera factory' type of service that provides list of cameras
+   *      connected to the host. Another one is an 'emulated camera' type of
+   *      service that provides interface to a camera connected to the host. At
+   *      the connection time emulator makes distinction between the two by
+   *      looking at connection parameters: no parameters means connection to
+   *      the 'factory' service, while connection with parameters means
+   *      connection to an 'emulated camera' service, where camera is identified
+   *      by one of the connection parameters. So, passing NULL, or an empty
+   *      string to this method will establish a connection with the 'factory'
+   *      service, while not empty string passed here will establish connection
+   *      with an 'emulated camera' service. Parameters defining the emulated
+   *      camera must be formatted as such:
+   *
+   *          "name=<device name> [inp_channel=<input channel #>]",
+   *
+   *      where 'device name' is a required parameter defining name of the
+   *      camera device, and 'input channel' is an optional parameter (positive
+   *      integer), defining the input channel to use on the camera device.
+   *      Note that device name passed here must have been previously obtained
+   *      from the factory service using 'list' query.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status.
+   */
+  virtual status_t connectClient(const char* param);
 
-    /* Disconnects from the service. */
-    virtual void disconnectClient();
+  /* Disconnects from the service. */
+  virtual void disconnectClient();
 
-    /* Sends data to the service.
-     * Param:
-     *  data, data_size - Data to send.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    virtual status_t sendMessage(const void* data, size_t data_size);
+  /* Sends data to the service.
+   * Param:
+   *  data, data_size - Data to send.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  virtual status_t sendMessage(const void* data, size_t data_size);
 
-    /* Receives data from the service.
-     * This method assumes that data to receive will come in two chunks: 8
-     * characters encoding the payload size in hexadecimal string, followed by
-     * the paylod (if any).
-     * This method will allocate data buffer where to receive the response.
-     * Param:
-     *  data - Upon success contains address of the allocated data buffer with
-     *      the data received from the service. The caller is responsible for
-     *      freeing allocated data buffer.
-     *  data_size - Upon success contains size of the data received from the
-     *      service.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    virtual status_t receiveMessage(void** data, size_t* data_size);
+  /* Receives data from the service.
+   * This method assumes that data to receive will come in two chunks: 8
+   * characters encoding the payload size in hexadecimal string, followed by
+   * the paylod (if any).
+   * This method will allocate data buffer where to receive the response.
+   * Param:
+   *  data - Upon success contains address of the allocated data buffer with
+   *      the data received from the service. The caller is responsible for
+   *      freeing allocated data buffer.
+   *  data_size - Upon success contains size of the data received from the
+   *      service.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  virtual status_t receiveMessage(void** data, size_t* data_size);
 
-    /* Sends a query, and receives a response from the service.
-     * Param:
-     *  query - Query to send to the service. When this method returns, the query
-     *  is completed, and all its relevant data members are properly initialized.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure. Note that
-     *  status returned here is not the final query status. Use isQuerySucceeded(),
-     *  or getCompletionStatus() method on the query object to see if it has
-     *  succeeded. However, if this method returns a failure, it means that the
-     *  query has failed, and there is no guarantee that its data members are
-     *  properly initialized (except for the 'mQueryDeliveryStatus', which is
-     *  always in the proper state).
-     */
-    virtual status_t doQuery(QemuQuery* query);
+  /* Sends a query, and receives a response from the service.
+   * Param:
+   *  query - Query to send to the service. When this method returns, the query
+   *  is completed, and all its relevant data members are properly initialized.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure. Note that
+   *  status returned here is not the final query status. Use
+   * isQuerySucceeded(), or getCompletionStatus() method on the query object to
+   * see if it has succeeded. However, if this method returns a failure, it
+   * means that the query has failed, and there is no guarantee that its data
+   * members are properly initialized (except for the 'mQueryDeliveryStatus',
+   * which is always in the proper state).
+   */
+  virtual status_t doQuery(QemuQuery* query);
 
-    /****************************************************************************
-     * Data members
-     ***************************************************************************/
+  /****************************************************************************
+   * Data members
+   ***************************************************************************/
 
-protected:
-    /* Qemu pipe handle. */
-    int     mPipeFD;
+ protected:
+  /* Qemu pipe handle. */
+  int mPipeFD;
 
-private:
-    /* Camera service name. */
-    static const char mCameraServiceName[];
+ private:
+  /* Camera service name. */
+  static const char mCameraServiceName[];
 };
 
 /****************************************************************************
@@ -298,52 +298,53 @@
 
 /* Encapsulates QemuClient for the 'factory' service. */
 class FactoryQemuClient : public QemuClient {
-public:
-    /* Constructs FactoryQemuClient instance. */
-    FactoryQemuClient();
+ public:
+  /* Constructs FactoryQemuClient instance. */
+  FactoryQemuClient();
 
-    /* Destructs FactoryQemuClient instance. */
-    ~FactoryQemuClient();
+  /* Destructs FactoryQemuClient instance. */
+  ~FactoryQemuClient();
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-public:
-    /* Lists camera devices connected to the host.
-     * Param:
-     *  list - Upon success contains a list of cameras connected to the host. The
-     *      list returned here is represented as a string, containing multiple
-     *      lines separated with '\n', where each line represents a camera. Each
-     *      camera line is formatted as such:
-     *
-     *          "name=<device name> channel=<num> pix=<num> framedims=<dimensions>\n"
-     *
-     *      Where:
-     *      - 'name' is the name of the camera device attached to the host. This
-     *        name must be used for subsequent connection to the 'emulated camera'
-     *        service for that camera.
-     *      - 'channel' - input channel number (positive int) to use to communicate
-     *        with the camera.
-     *      - 'pix' - pixel format (a "fourcc" uint), chosen for the video frames
-     *        by the camera service.
-     *      - 'framedims' contains a list of frame dimensions supported by the
-     *        camera for the chosen pixel format. Each etry in the list is in form
-     *        '<width>x<height>', where 'width' and 'height' are numeric values
-     *        for width and height of a supported frame dimension. Entries in
-     *        this list are separated with ',' with no spaces between the entries.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    status_t listCameras(char** list);
+ public:
+  /* Lists camera devices connected to the host.
+   * Param:
+   *  list - Upon success contains a list of cameras connected to the host. The
+   *      list returned here is represented as a string, containing multiple
+   *      lines separated with '\n', where each line represents a camera. Each
+   *      camera line is formatted as such:
+   *
+   *          "name=<device name> channel=<num> pix=<num>
+   * framedims=<dimensions>\n"
+   *
+   *      Where:
+   *      - 'name' is the name of the camera device attached to the host. This
+   *        name must be used for subsequent connection to the 'emulated camera'
+   *        service for that camera.
+   *      - 'channel' - input channel number (positive int) to use to
+   * communicate with the camera.
+   *      - 'pix' - pixel format (a "fourcc" uint), chosen for the video frames
+   *        by the camera service.
+   *      - 'framedims' contains a list of frame dimensions supported by the
+   *        camera for the chosen pixel format. Each etry in the list is in form
+   *        '<width>x<height>', where 'width' and 'height' are numeric values
+   *        for width and height of a supported frame dimension. Entries in
+   *        this list are separated with ',' with no spaces between the entries.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  status_t listCameras(char** list);
 
-    /****************************************************************************
-     * Names of the queries available for the emulated camera factory.
-     ***************************************************************************/
+  /****************************************************************************
+   * Names of the queries available for the emulated camera factory.
+   ***************************************************************************/
 
-private:
-    /* List cameras connected to the host. */
-    static const char mQueryList[];
+ private:
+  /* List cameras connected to the host. */
+  static const char mQueryList[];
 };
 
 /****************************************************************************
@@ -353,85 +354,80 @@
 /* Encapsulates QemuClient for an 'emulated camera' service.
  */
 class CameraQemuClient : public QemuClient {
-public:
-    /* Constructs CameraQemuClient instance. */
-    CameraQemuClient();
+ public:
+  /* Constructs CameraQemuClient instance. */
+  CameraQemuClient();
 
-    /* Destructs CameraQemuClient instance. */
-    ~CameraQemuClient();
+  /* Destructs CameraQemuClient instance. */
+  ~CameraQemuClient();
 
-    /****************************************************************************
-     * Public API
-     ***************************************************************************/
+  /****************************************************************************
+   * Public API
+   ***************************************************************************/
 
-public:
-    /* Queries camera connection.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    status_t queryConnect();
+ public:
+  /* Queries camera connection.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  status_t queryConnect();
 
-    /* Queries camera disconnection.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    status_t queryDisconnect();
+  /* Queries camera disconnection.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  status_t queryDisconnect();
 
-    /* Queries camera to start capturing video.
-     * Param:
-     *  pixel_format - Pixel format that is used by the client to push video
-     *      frames to the camera framework.
-     *  width, height - Frame dimensions, requested by the framework.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    status_t queryStart(uint32_t pixel_format, int width, int height);
+  /* Queries camera to start capturing video.
+   * Param:
+   *  pixel_format - Pixel format that is used by the client to push video
+   *      frames to the camera framework.
+   *  width, height - Frame dimensions, requested by the framework.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  status_t queryStart(uint32_t pixel_format, int width, int height);
 
-    /* Queries camera to stop capturing video.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    status_t queryStop();
+  /* Queries camera to stop capturing video.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  status_t queryStop();
 
-    /* Queries camera for the next video frame.
-     * Param:
-     *  vframe, vframe_size - Define buffer, allocated to receive a video frame.
-     *      Any of these parameters can be 0, indicating that the caller is
-     *      interested only in preview frame.
-     *  pframe, pframe_size - Define buffer, allocated to receive a preview frame.
-     *      Any of these parameters can be 0, indicating that the caller is
-     *      interested only in video frame.
-     *  r_scale, g_scale, b_scale - White balance scale.
-     *  exposure_comp - Expsoure compensation.
-     * Return:
-     *  NO_ERROR on success, or an appropriate error status on failure.
-     */
-    status_t queryFrame(void* vframe,
-                        void* pframe,
-                        size_t vframe_size,
-                        size_t pframe_size,
-                        float r_scale,
-                        float g_scale,
-                        float b_scale,
-                        float exposure_comp);
+  /* Queries camera for the next video frame.
+   * Param:
+   *  vframe, vframe_size - Define buffer, allocated to receive a video frame.
+   *      Any of these parameters can be 0, indicating that the caller is
+   *      interested only in preview frame.
+   *  pframe, pframe_size - Define buffer, allocated to receive a preview frame.
+   *      Any of these parameters can be 0, indicating that the caller is
+   *      interested only in video frame.
+   *  r_scale, g_scale, b_scale - White balance scale.
+   *  exposure_comp - Expsoure compensation.
+   * Return:
+   *  NO_ERROR on success, or an appropriate error status on failure.
+   */
+  status_t queryFrame(void* vframe, void* pframe, size_t vframe_size,
+                      size_t pframe_size, float r_scale, float g_scale,
+                      float b_scale, float exposure_comp);
 
-    /****************************************************************************
-     * Names of the queries available for the emulated camera.
-     ***************************************************************************/
+  /****************************************************************************
+   * Names of the queries available for the emulated camera.
+   ***************************************************************************/
 
-private:
-    /* Connect to the camera. */
-    static const char mQueryConnect[];
-    /* Disconnect from the camera. */
-    static const char mQueryDisconnect[];
-    /* Start video capturing. */
-    static const char mQueryStart[];
-    /* Stop video capturing. */
-    static const char mQueryStop[];
-    /* Query frame(s). */
-    static const char mQueryFrame[];
+ private:
+  /* Connect to the camera. */
+  static const char mQueryConnect[];
+  /* Disconnect from the camera. */
+  static const char mQueryDisconnect[];
+  /* Start video capturing. */
+  static const char mQueryStart[];
+  /* Stop video capturing. */
+  static const char mQueryStop[];
+  /* Query frame(s). */
+  static const char mQueryFrame[];
 };
 
 }; /* namespace android */
 
-#endif  /* HW_EMULATOR_CAMERA_QEMU_CLIENT_H */
+#endif /* HW_EMULATOR_CAMERA_QEMU_CLIENT_H */
diff --git a/guest/hals/camera/VSoCEmulatedCameraHotplugThread.cpp b/guest/hals/camera/VSoCEmulatedCameraHotplugThread.cpp
index 1f70fbf..8cd6d55 100644
--- a/guest/hals/camera/VSoCEmulatedCameraHotplugThread.cpp
+++ b/guest/hals/camera/VSoCEmulatedCameraHotplugThread.cpp
@@ -17,78 +17,67 @@
 #define LOG_TAG "EmulatedCamera_HotplugThread"
 #include <cutils/log.h>
 
-#include <sys/types.h>
-#include <sys/stat.h>
 #include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
 
-#include "EmulatedCameraHotplugThread.h"
 #include "EmulatedCameraFactory.h"
+#include "EmulatedCameraHotplugThread.h"
 
 #define SubscriberInfo EmulatedCameraHotplugThread::SubscriberInfo
 
 namespace android {
 
 EmulatedCameraHotplugThread::EmulatedCameraHotplugThread(
-    size_t totalCameraCount) :
-        Thread(/*canCallJava*/false) {}
+    size_t totalCameraCount)
+    : Thread(/*canCallJava*/ false) {}
 
 EmulatedCameraHotplugThread::~EmulatedCameraHotplugThread() {}
 
 status_t EmulatedCameraHotplugThread::requestExitAndWait() {
-    ALOGE("%s: Not implemented. Use requestExit + join instead",
-          __FUNCTION__);
-    return INVALID_OPERATION;
+  ALOGE("%s: Not implemented. Use requestExit + join instead", __FUNCTION__);
+  return INVALID_OPERATION;
 }
 
 void EmulatedCameraHotplugThread::requestExit() {
-    ALOGV("%s: Requesting thread exit", __FUNCTION__);
-    mRunning = false;
+  ALOGV("%s: Requesting thread exit", __FUNCTION__);
+  mRunning = false;
 }
 
-status_t EmulatedCameraHotplugThread::readyToRun() {
-    return OK;
-}
+status_t EmulatedCameraHotplugThread::readyToRun() { return OK; }
 
 bool EmulatedCameraHotplugThread::threadLoop() {
-    // Thread is irrelevant right now; hoplug is not supported.
-    return false;
+  // Thread is irrelevant right now; hoplug is not supported.
+  return false;
 }
 
 String8 EmulatedCameraHotplugThread::getFilePath(int cameraId) const {
-    return String8();
+  return String8();
 }
 
-bool EmulatedCameraHotplugThread::createFileIfNotExists(int cameraId) const
-{
-    return true;
+bool EmulatedCameraHotplugThread::createFileIfNotExists(int cameraId) const {
+  return true;
 }
 
 int EmulatedCameraHotplugThread::getCameraId(String8 filePath) const {
-    // Not used anywhere.
-    return NAME_NOT_FOUND;
+  // Not used anywhere.
+  return NAME_NOT_FOUND;
 }
 
 int EmulatedCameraHotplugThread::getCameraId(int wd) const {
-    // Not used anywhere.
-    return NAME_NOT_FOUND;
+  // Not used anywhere.
+  return NAME_NOT_FOUND;
 }
 
-SubscriberInfo* EmulatedCameraHotplugThread::getSubscriberInfo(int cameraId)
-{
-    // Not used anywhere.
-    return NULL;
+SubscriberInfo* EmulatedCameraHotplugThread::getSubscriberInfo(int cameraId) {
+  // Not used anywhere.
+  return NULL;
 }
 
-bool EmulatedCameraHotplugThread::addWatch(int cameraId) {
-    return true;
-}
+bool EmulatedCameraHotplugThread::addWatch(int cameraId) { return true; }
 
-bool EmulatedCameraHotplugThread::removeWatch(int cameraId) {
-    return true;
-}
+bool EmulatedCameraHotplugThread::removeWatch(int cameraId) { return true; }
 
-int EmulatedCameraHotplugThread::readFile(String8 filePath) const {
-    return 1;
-}
+int EmulatedCameraHotplugThread::readFile(String8 filePath) const { return 1; }
 
-} //namespace android
+}  // namespace android
diff --git a/guest/hals/camera/fake-pipeline2/Base.h b/guest/hals/camera/fake-pipeline2/Base.h
index dfeeeca..638a719 100644
--- a/guest/hals/camera/fake-pipeline2/Base.h
+++ b/guest/hals/camera/fake-pipeline2/Base.h
@@ -32,38 +32,37 @@
 
 namespace android {
 
-
 /* Internal structure for passing buffers across threads */
 struct StreamBuffer {
-    // Positive numbers are output streams
-    // Negative numbers are input reprocess streams
-    // Zero is an auxillary buffer
-    int streamId;
-    uint32_t width, height;
-    uint32_t format;
-    uint32_t dataSpace;
-    uint32_t stride;
-    buffer_handle_t *buffer;
-    uint8_t *img;
+  // Positive numbers are output streams
+  // Negative numbers are input reprocess streams
+  // Zero is an auxillary buffer
+  int streamId;
+  uint32_t width, height;
+  uint32_t format;
+  uint32_t dataSpace;
+  uint32_t stride;
+  buffer_handle_t *buffer;
+  uint8_t *img;
 };
 typedef Vector<StreamBuffer> Buffers;
 
 struct Stream {
-    const camera2_stream_ops_t *ops;
-    uint32_t width, height;
-    int32_t format;
-    uint32_t stride;
+  const camera2_stream_ops_t *ops;
+  uint32_t width, height;
+  int32_t format;
+  uint32_t stride;
 };
 
 struct ReprocessStream {
-    const camera2_stream_in_ops_t *ops;
-    uint32_t width, height;
-    int32_t format;
-    uint32_t stride;
-    // -1 if the reprocessing stream is independent
-    int32_t sourceStreamId;
+  const camera2_stream_in_ops_t *ops;
+  uint32_t width, height;
+  int32_t format;
+  uint32_t stride;
+  // -1 if the reprocessing stream is independent
+  int32_t sourceStreamId;
 };
 
-} // namespace android;
+}  // namespace android
 
 #endif
diff --git a/guest/hals/camera/fake-pipeline2/JpegCompressor.cpp b/guest/hals/camera/fake-pipeline2/JpegCompressor.cpp
index f7ba234..f866435 100644
--- a/guest/hals/camera/fake-pipeline2/JpegCompressor.cpp
+++ b/guest/hals/camera/fake-pipeline2/JpegCompressor.cpp
@@ -19,277 +19,269 @@
 
 #include <utils/Log.h>
 
-#include "JpegCompressor.h"
 #include "../EmulatedFakeCamera2.h"
 #include "../EmulatedFakeCamera3.h"
+#include "JpegCompressor.h"
 
 namespace android {
 
-JpegCompressor::JpegCompressor():
-        Thread(false),
-        mIsBusy(false),
-        mSynchronous(false),
-        mBuffers(NULL),
-        mListener(NULL) {
-}
+JpegCompressor::JpegCompressor()
+    : Thread(false),
+      mIsBusy(false),
+      mSynchronous(false),
+      mBuffers(NULL),
+      mListener(NULL) {}
 
-JpegCompressor::~JpegCompressor() {
-    Mutex::Autolock lock(mMutex);
-}
+JpegCompressor::~JpegCompressor() { Mutex::Autolock lock(mMutex); }
 
 status_t JpegCompressor::reserve() {
-    Mutex::Autolock busyLock(mBusyMutex);
-    if (mIsBusy) {
-        ALOGE("%s: Already processing a buffer!", __FUNCTION__);
-        return INVALID_OPERATION;
-    }
-    mIsBusy = true;
-    return OK;
+  Mutex::Autolock busyLock(mBusyMutex);
+  if (mIsBusy) {
+    ALOGE("%s: Already processing a buffer!", __FUNCTION__);
+    return INVALID_OPERATION;
+  }
+  mIsBusy = true;
+  return OK;
 }
 
 status_t JpegCompressor::start(Buffers *buffers, JpegListener *listener) {
-    if (listener == NULL) {
-        ALOGE("%s: NULL listener not allowed!", __FUNCTION__);
-        return BAD_VALUE;
-    }
-    ALOGV("%s: Starting JPEG compression thread", __FUNCTION__);
-    Mutex::Autolock lock(mMutex);
-    {
-        Mutex::Autolock busyLock(mBusyMutex);
+  if (listener == NULL) {
+    ALOGE("%s: NULL listener not allowed!", __FUNCTION__);
+    return BAD_VALUE;
+  }
+  ALOGV("%s: Starting JPEG compression thread", __FUNCTION__);
+  Mutex::Autolock lock(mMutex);
+  {
+    Mutex::Autolock busyLock(mBusyMutex);
 
-        if (!mIsBusy) {
-            ALOGE("Called start without reserve() first!");
-            return INVALID_OPERATION;
-        }
-        mSynchronous = false;
-        mBuffers = buffers;
-        mListener = listener;
+    if (!mIsBusy) {
+      ALOGE("Called start without reserve() first!");
+      return INVALID_OPERATION;
     }
+    mSynchronous = false;
+    mBuffers = buffers;
+    mListener = listener;
+  }
 
-    status_t res;
-    res = run("EmulatedFakeCamera2::JpegCompressor");
-    if (res != OK) {
-        ALOGE("%s: Unable to start up compression thread: %s (%d)",
-                __FUNCTION__, strerror(-res), res);
-        delete mBuffers;
-    }
-    return res;
+  status_t res;
+  res = run("EmulatedFakeCamera2::JpegCompressor");
+  if (res != OK) {
+    ALOGE("%s: Unable to start up compression thread: %s (%d)", __FUNCTION__,
+          strerror(-res), res);
+    delete mBuffers;
+  }
+  return res;
 }
 
 status_t JpegCompressor::compressSynchronous(Buffers *buffers) {
-    status_t res;
+  status_t res;
 
-    Mutex::Autolock lock(mMutex);
-    {
-        Mutex::Autolock busyLock(mBusyMutex);
+  Mutex::Autolock lock(mMutex);
+  {
+    Mutex::Autolock busyLock(mBusyMutex);
 
-        if (mIsBusy) {
-            ALOGE("%s: Already processing a buffer!", __FUNCTION__);
-            return INVALID_OPERATION;
-        }
-
-        mIsBusy = true;
-        mSynchronous = true;
-        mBuffers = buffers;
+    if (mIsBusy) {
+      ALOGE("%s: Already processing a buffer!", __FUNCTION__);
+      return INVALID_OPERATION;
     }
 
-    res = compress();
+    mIsBusy = true;
+    mSynchronous = true;
+    mBuffers = buffers;
+  }
 
-    cleanUp();
+  res = compress();
 
-    return res;
+  cleanUp();
+
+  return res;
 }
 
 status_t JpegCompressor::cancel() {
-    requestExitAndWait();
-    return OK;
+  requestExitAndWait();
+  return OK;
 }
 
-status_t JpegCompressor::readyToRun() {
-    return OK;
-}
+status_t JpegCompressor::readyToRun() { return OK; }
 
 bool JpegCompressor::threadLoop() {
-    status_t res;
-    ALOGV("%s: Starting compression thread", __FUNCTION__);
+  status_t res;
+  ALOGV("%s: Starting compression thread", __FUNCTION__);
 
-    res = compress();
+  res = compress();
 
-    mListener->onJpegDone(mJpegBuffer, res == OK);
+  mListener->onJpegDone(mJpegBuffer, res == OK);
 
-    cleanUp();
+  cleanUp();
 
-    return false;
+  return false;
 }
 
 status_t JpegCompressor::compress() {
-    // Find source and target buffers. Assumes only one buffer matches
-    // each condition!
-    ALOGV("%s: Compressing start", __FUNCTION__);
-    bool foundJpeg = false, mFoundAux = false;
-    for (size_t i = 0; i < mBuffers->size(); i++) {
-        const StreamBuffer &b = (*mBuffers)[i];
-        if (b.format == HAL_PIXEL_FORMAT_BLOB) {
-            mJpegBuffer = b;
-            mFoundJpeg = true;
-        } else if (b.streamId <= 0) {
-            mAuxBuffer = b;
-            mFoundAux = true;
-        }
-        if (mFoundJpeg && mFoundAux) break;
+  // Find source and target buffers. Assumes only one buffer matches
+  // each condition!
+  ALOGV("%s: Compressing start", __FUNCTION__);
+  bool foundJpeg = false, mFoundAux = false;
+  for (size_t i = 0; i < mBuffers->size(); i++) {
+    const StreamBuffer &b = (*mBuffers)[i];
+    if (b.format == HAL_PIXEL_FORMAT_BLOB) {
+      mJpegBuffer = b;
+      mFoundJpeg = true;
+    } else if (b.streamId <= 0) {
+      mAuxBuffer = b;
+      mFoundAux = true;
     }
-    if (!mFoundJpeg || !mFoundAux) {
-        ALOGE("%s: Unable to find buffers for JPEG source/destination",
-                __FUNCTION__);
-        return BAD_VALUE;
+    if (mFoundJpeg && mFoundAux) break;
+  }
+  if (!mFoundJpeg || !mFoundAux) {
+    ALOGE("%s: Unable to find buffers for JPEG source/destination",
+          __FUNCTION__);
+    return BAD_VALUE;
+  }
+
+  // Set up error management
+
+  mJpegErrorInfo = NULL;
+  JpegError error;
+  error.parent = this;
+
+  mCInfo.err = jpeg_std_error(&error);
+  mCInfo.err->error_exit = jpegErrorHandler;
+
+  jpeg_create_compress(&mCInfo);
+  if (checkError("Error initializing compression")) return NO_INIT;
+
+  // Route compressed data straight to output stream buffer
+
+  JpegDestination jpegDestMgr;
+  jpegDestMgr.parent = this;
+  jpegDestMgr.init_destination = jpegInitDestination;
+  jpegDestMgr.empty_output_buffer = jpegEmptyOutputBuffer;
+  jpegDestMgr.term_destination = jpegTermDestination;
+
+  mCInfo.dest = &jpegDestMgr;
+
+  // Set up compression parameters
+
+  mCInfo.image_width = mAuxBuffer.width;
+  mCInfo.image_height = mAuxBuffer.height;
+  mCInfo.input_components = 3;
+  mCInfo.in_color_space = JCS_RGB;
+
+  jpeg_set_defaults(&mCInfo);
+  if (checkError("Error configuring defaults")) return NO_INIT;
+
+  // Do compression
+
+  jpeg_start_compress(&mCInfo, TRUE);
+  if (checkError("Error starting compression")) return NO_INIT;
+
+  size_t rowStride = mAuxBuffer.stride * 3;
+  const size_t kChunkSize = 32;
+  while (mCInfo.next_scanline < mCInfo.image_height) {
+    JSAMPROW chunk[kChunkSize];
+    for (size_t i = 0; i < kChunkSize; i++) {
+      chunk[i] =
+          (JSAMPROW)(mAuxBuffer.img + (i + mCInfo.next_scanline) * rowStride);
     }
-
-    // Set up error management
-
-    mJpegErrorInfo = NULL;
-    JpegError error;
-    error.parent = this;
-
-    mCInfo.err = jpeg_std_error(&error);
-    mCInfo.err->error_exit = jpegErrorHandler;
-
-    jpeg_create_compress(&mCInfo);
-    if (checkError("Error initializing compression")) return NO_INIT;
-
-    // Route compressed data straight to output stream buffer
-
-    JpegDestination jpegDestMgr;
-    jpegDestMgr.parent = this;
-    jpegDestMgr.init_destination = jpegInitDestination;
-    jpegDestMgr.empty_output_buffer = jpegEmptyOutputBuffer;
-    jpegDestMgr.term_destination = jpegTermDestination;
-
-    mCInfo.dest = &jpegDestMgr;
-
-    // Set up compression parameters
-
-    mCInfo.image_width = mAuxBuffer.width;
-    mCInfo.image_height = mAuxBuffer.height;
-    mCInfo.input_components = 3;
-    mCInfo.in_color_space = JCS_RGB;
-
-    jpeg_set_defaults(&mCInfo);
-    if (checkError("Error configuring defaults")) return NO_INIT;
-
-    // Do compression
-
-    jpeg_start_compress(&mCInfo, TRUE);
-    if (checkError("Error starting compression")) return NO_INIT;
-
-    size_t rowStride = mAuxBuffer.stride * 3;
-    const size_t kChunkSize = 32;
-    while (mCInfo.next_scanline < mCInfo.image_height) {
-        JSAMPROW chunk[kChunkSize];
-        for (size_t i = 0 ; i < kChunkSize; i++) {
-            chunk[i] = (JSAMPROW)
-                    (mAuxBuffer.img + (i + mCInfo.next_scanline) * rowStride);
-        }
-        jpeg_write_scanlines(&mCInfo, chunk, kChunkSize);
-        if (checkError("Error while compressing")) return NO_INIT;
-        if (exitPending()) {
-            ALOGV("%s: Cancel called, exiting early", __FUNCTION__);
-            return TIMED_OUT;
-        }
+    jpeg_write_scanlines(&mCInfo, chunk, kChunkSize);
+    if (checkError("Error while compressing")) return NO_INIT;
+    if (exitPending()) {
+      ALOGV("%s: Cancel called, exiting early", __FUNCTION__);
+      return TIMED_OUT;
     }
+  }
 
-    jpeg_finish_compress(&mCInfo);
-    if (checkError("Error while finishing compression")) return NO_INIT;
+  jpeg_finish_compress(&mCInfo);
+  if (checkError("Error while finishing compression")) return NO_INIT;
 
-    // All done
-    ALOGV("%s: Compressing done", __FUNCTION__);
+  // All done
+  ALOGV("%s: Compressing done", __FUNCTION__);
 
-    return OK;
+  return OK;
 }
 
 bool JpegCompressor::isBusy() {
-    Mutex::Autolock busyLock(mBusyMutex);
-    return mIsBusy;
+  Mutex::Autolock busyLock(mBusyMutex);
+  return mIsBusy;
 }
 
 bool JpegCompressor::isStreamInUse(uint32_t id) {
-    Mutex::Autolock lock(mBusyMutex);
+  Mutex::Autolock lock(mBusyMutex);
 
-    if (mBuffers && mIsBusy) {
-        for (size_t i = 0; i < mBuffers->size(); i++) {
-            if ( (*mBuffers)[i].streamId == (int)id ) return true;
-        }
+  if (mBuffers && mIsBusy) {
+    for (size_t i = 0; i < mBuffers->size(); i++) {
+      if ((*mBuffers)[i].streamId == (int)id) return true;
     }
-    return false;
+  }
+  return false;
 }
 
 bool JpegCompressor::waitForDone(nsecs_t timeout) {
-    Mutex::Autolock lock(mBusyMutex);
-    while (mIsBusy) {
-        status_t res = mDone.waitRelative(mBusyMutex, timeout);
-        if (res != OK) return false;
-    }
-    return true;
+  Mutex::Autolock lock(mBusyMutex);
+  while (mIsBusy) {
+    status_t res = mDone.waitRelative(mBusyMutex, timeout);
+    if (res != OK) return false;
+  }
+  return true;
 }
 
 bool JpegCompressor::checkError(const char *msg) {
-    if (mJpegErrorInfo) {
-        char errBuffer[JMSG_LENGTH_MAX];
-        mJpegErrorInfo->err->format_message(mJpegErrorInfo, errBuffer);
-        ALOGE("%s: %s: %s",
-                __FUNCTION__, msg, errBuffer);
-        mJpegErrorInfo = NULL;
-        return true;
-    }
-    return false;
+  if (mJpegErrorInfo) {
+    char errBuffer[JMSG_LENGTH_MAX];
+    mJpegErrorInfo->err->format_message(mJpegErrorInfo, errBuffer);
+    ALOGE("%s: %s: %s", __FUNCTION__, msg, errBuffer);
+    mJpegErrorInfo = NULL;
+    return true;
+  }
+  return false;
 }
 
 void JpegCompressor::cleanUp() {
-    status_t res;
-    jpeg_destroy_compress(&mCInfo);
-    Mutex::Autolock lock(mBusyMutex);
+  status_t res;
+  jpeg_destroy_compress(&mCInfo);
+  Mutex::Autolock lock(mBusyMutex);
 
-    if (mFoundAux) {
-        if (mAuxBuffer.streamId == 0) {
-            delete[] mAuxBuffer.img;
-        } else if (!mSynchronous) {
-            mListener->onJpegInputDone(mAuxBuffer);
-        }
+  if (mFoundAux) {
+    if (mAuxBuffer.streamId == 0) {
+      delete[] mAuxBuffer.img;
+    } else if (!mSynchronous) {
+      mListener->onJpegInputDone(mAuxBuffer);
     }
-    if (!mSynchronous) {
-        delete mBuffers;
-    }
+  }
+  if (!mSynchronous) {
+    delete mBuffers;
+  }
 
-    mBuffers = NULL;
+  mBuffers = NULL;
 
-    mIsBusy = false;
-    mDone.signal();
+  mIsBusy = false;
+  mDone.signal();
 }
 
 void JpegCompressor::jpegErrorHandler(j_common_ptr cinfo) {
-    JpegError *error = static_cast<JpegError*>(cinfo->err);
-    error->parent->mJpegErrorInfo = cinfo;
+  JpegError *error = static_cast<JpegError *>(cinfo->err);
+  error->parent->mJpegErrorInfo = cinfo;
 }
 
 void JpegCompressor::jpegInitDestination(j_compress_ptr cinfo) {
-    JpegDestination *dest= static_cast<JpegDestination*>(cinfo->dest);
-    ALOGV("%s: Setting destination to %p, size %zu",
-            __FUNCTION__, dest->parent->mJpegBuffer.img, kMaxJpegSize);
-    dest->next_output_byte = (JOCTET*)(dest->parent->mJpegBuffer.img);
-    dest->free_in_buffer = kMaxJpegSize;
+  JpegDestination *dest = static_cast<JpegDestination *>(cinfo->dest);
+  ALOGV("%s: Setting destination to %p, size %zu", __FUNCTION__,
+        dest->parent->mJpegBuffer.img, kMaxJpegSize);
+  dest->next_output_byte = (JOCTET *)(dest->parent->mJpegBuffer.img);
+  dest->free_in_buffer = kMaxJpegSize;
 }
 
 boolean JpegCompressor::jpegEmptyOutputBuffer(j_compress_ptr cinfo) {
-    ALOGE("%s: JPEG destination buffer overflow!",
-            __FUNCTION__);
-    return true;
+  ALOGE("%s: JPEG destination buffer overflow!", __FUNCTION__);
+  return true;
 }
 
 void JpegCompressor::jpegTermDestination(j_compress_ptr cinfo) {
-    ALOGV("%s: Done writing JPEG data. %zu bytes left in buffer",
-            __FUNCTION__, cinfo->dest->free_in_buffer);
+  ALOGV("%s: Done writing JPEG data. %zu bytes left in buffer", __FUNCTION__,
+        cinfo->dest->free_in_buffer);
 }
 
-JpegCompressor::JpegListener::~JpegListener() {
-}
+JpegCompressor::JpegListener::~JpegListener() {}
 
-} // namespace android
+}  // namespace android
diff --git a/guest/hals/camera/fake-pipeline2/JpegCompressor.h b/guest/hals/camera/fake-pipeline2/JpegCompressor.h
index 597cbdf..bdbc772 100644
--- a/guest/hals/camera/fake-pipeline2/JpegCompressor.h
+++ b/guest/hals/camera/fake-pipeline2/JpegCompressor.h
@@ -14,7 +14,6 @@
  * limitations under the License.
  */
 
-
 /**
  * This class simulates a hardware JPEG compressor.  It receives image buffers
  * in RGBA_8888 format, processes them in a worker thread, and then pushes them
@@ -24,8 +23,8 @@
 #ifndef HW_EMULATOR_CAMERA2_JPEG_H
 #define HW_EMULATOR_CAMERA2_JPEG_H
 
-#include "utils/Thread.h"
 #include "utils/Mutex.h"
+#include "utils/Thread.h"
 #include "utils/Timers.h"
 
 #include "Base.h"
@@ -38,87 +37,85 @@
 
 namespace android {
 
-class JpegCompressor: private Thread, public virtual RefBase {
-  public:
+class JpegCompressor : private Thread, public virtual RefBase {
+ public:
+  JpegCompressor();
+  ~JpegCompressor();
 
-    JpegCompressor();
-    ~JpegCompressor();
+  struct JpegListener {
+    // Called when JPEG compression has finished, or encountered an error
+    virtual void onJpegDone(const StreamBuffer &jpegBuffer, bool success) = 0;
+    // Called when the input buffer for JPEG is not needed any more,
+    // if the buffer came from the framework.
+    virtual void onJpegInputDone(const StreamBuffer &inputBuffer) = 0;
+    virtual ~JpegListener();
+  };
 
-    struct JpegListener {
-        // Called when JPEG compression has finished, or encountered an error
-        virtual void onJpegDone(const StreamBuffer &jpegBuffer,
-                bool success) = 0;
-        // Called when the input buffer for JPEG is not needed any more,
-        // if the buffer came from the framework.
-        virtual void onJpegInputDone(const StreamBuffer &inputBuffer) = 0;
-        virtual ~JpegListener();
-    };
+  // Start compressing COMPRESSED format buffers; JpegCompressor takes
+  // ownership of the Buffers vector.
+  // Reserve() must be called first.
+  status_t start(Buffers *buffers, JpegListener *listener);
 
-    // Start compressing COMPRESSED format buffers; JpegCompressor takes
-    // ownership of the Buffers vector.
-    // Reserve() must be called first.
-    status_t start(Buffers *buffers, JpegListener *listener);
+  // Compress and block until buffer is complete.
+  status_t compressSynchronous(Buffers *buffers);
 
-    // Compress and block until buffer is complete.
-    status_t compressSynchronous(Buffers *buffers);
+  status_t cancel();
 
-    status_t cancel();
+  bool isBusy();
+  bool isStreamInUse(uint32_t id);
 
-    bool isBusy();
-    bool isStreamInUse(uint32_t id);
+  bool waitForDone(nsecs_t timeout);
 
-    bool waitForDone(nsecs_t timeout);
+  // Reserve the compressor for a later start() call.
+  status_t reserve();
 
-    // Reserve the compressor for a later start() call.
-    status_t reserve();
+  // TODO: Measure this
+  static const size_t kMaxJpegSize = 300000;
 
-    // TODO: Measure this
-    static const size_t kMaxJpegSize = 300000;
+ private:
+  Mutex mBusyMutex;
+  bool mIsBusy;
+  Condition mDone;
+  bool mSynchronous;
 
-  private:
-    Mutex mBusyMutex;
-    bool mIsBusy;
-    Condition mDone;
-    bool mSynchronous;
+  Mutex mMutex;
 
-    Mutex mMutex;
+  Buffers *mBuffers;
+  JpegListener *mListener;
 
-    Buffers *mBuffers;
-    JpegListener *mListener;
+  StreamBuffer mJpegBuffer, mAuxBuffer;
+  bool mFoundJpeg, mFoundAux;
 
-    StreamBuffer mJpegBuffer, mAuxBuffer;
-    bool mFoundJpeg, mFoundAux;
+  jpeg_compress_struct mCInfo;
 
-    jpeg_compress_struct mCInfo;
+  struct JpegError : public jpeg_error_mgr {
+    JpegCompressor *parent;
+  };
+  j_common_ptr mJpegErrorInfo;
 
-    struct JpegError : public jpeg_error_mgr {
-        JpegCompressor *parent;
-    };
-    j_common_ptr mJpegErrorInfo;
+  struct JpegDestination : public jpeg_destination_mgr {
+    JpegCompressor *parent;
+  };
 
-    struct JpegDestination : public jpeg_destination_mgr {
-        JpegCompressor *parent;
-    };
+  static void jpegErrorHandler(j_common_ptr cinfo);
 
-    static void jpegErrorHandler(j_common_ptr cinfo);
+  static void jpegInitDestination(j_compress_ptr cinfo);
+  static boolean jpegEmptyOutputBuffer(j_compress_ptr cinfo);
+  static void jpegTermDestination(j_compress_ptr cinfo);
 
-    static void jpegInitDestination(j_compress_ptr cinfo);
-    static boolean jpegEmptyOutputBuffer(j_compress_ptr cinfo);
-    static void jpegTermDestination(j_compress_ptr cinfo);
+  bool checkError(const char *msg);
+  status_t compress();
 
-    bool checkError(const char *msg);
-    status_t compress();
+  void cleanUp();
 
-    void cleanUp();
-
-    /**
-     * Inherited Thread virtual overrides
-     */
-  private:
-    virtual status_t readyToRun();
-    virtual bool threadLoop();
+  /**
+   * Inherited Thread virtual overrides
+   */
+ private:
+  virtual status_t readyToRun();
+  virtual bool threadLoop();
 };
 
-} // namespace android
+}  // namespace android
 
 #endif
diff --git a/guest/hals/camera/fake-pipeline2/Scene.cpp b/guest/hals/camera/fake-pipeline2/Scene.cpp
index 48296d2..c70dc4c 100644
--- a/guest/hals/camera/fake-pipeline2/Scene.cpp
+++ b/guest/hals/camera/fake-pipeline2/Scene.cpp
@@ -16,10 +16,10 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "EmulatedCamera_Scene"
-#include <utils/Log.h>
-#include <stdlib.h>
-#include <cmath>
 #include "Scene.h"
+#include <stdlib.h>
+#include <utils/Log.h>
+#include <cmath>
 
 // TODO: This should probably be done host-side in OpenGL for speed and better
 // quality
@@ -45,26 +45,26 @@
 
 const uint8_t Scene::kScene[Scene::kSceneWidth * Scene::kSceneHeight] = {
     //      5         10        15        20
-    K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
-    K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
-    K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
-    K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
-    K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K, // 5
-    K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,K,
-    K,K,K,K,K,K,K,K,H,H,H,H,H,H,H,H,H,H,H,H,
-    K,K,K,K,K,K,K,K,H,H,H,H,H,H,H,C,C,H,H,H,
-    K,K,K,K,K,K,H,H,H,H,H,H,H,H,H,C,C,H,H,H,
-    H,K,K,K,K,K,H,R,R,R,R,R,R,R,R,R,R,R,R,H, // 10
-    H,K,K,K,K,H,H,R,R,R,R,R,R,R,R,R,R,R,R,H,
-    H,H,H,K,K,H,H,R,R,R,R,R,R,R,R,R,R,R,R,H,
-    H,H,H,K,K,H,H,H,W,W,W,W,W,W,W,W,W,W,H,H,
-    S,S,S,G,G,S,S,S,W,W,W,W,W,W,W,W,W,W,S,S,
-    S,G,G,G,G,S,S,S,W,I,I,W,D,D,W,I,I,W,S,S, // 15
-    G,G,G,G,G,G,S,S,W,I,I,W,D,D,W,I,I,W,S,S,
-    G,G,G,G,G,G,G,G,W,W,W,W,D,D,W,W,W,W,G,G,
-    G,G,G,G,G,G,G,G,W,W,W,W,D,D,W,W,W,W,G,G,
-    G,G,G,G,G,G,G,G,S,S,S,S,S,S,S,S,S,S,G,G,
-    G,G,G,G,G,G,G,G,S,S,S,S,S,S,S,S,S,S,G,G, // 20
+    K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K,
+    K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K,
+    K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K,
+    K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K,
+    K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K,  // 5
+    K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K, K,
+    K, K, K, K, K, K, K, K, H, H, H, H, H, H, H, H, H, H, H, H,
+    K, K, K, K, K, K, K, K, H, H, H, H, H, H, H, C, C, H, H, H,
+    K, K, K, K, K, K, H, H, H, H, H, H, H, H, H, C, C, H, H, H,
+    H, K, K, K, K, K, H, R, R, R, R, R, R, R, R, R, R, R, R, H,  // 10
+    H, K, K, K, K, H, H, R, R, R, R, R, R, R, R, R, R, R, R, H,
+    H, H, H, K, K, H, H, R, R, R, R, R, R, R, R, R, R, R, R, H,
+    H, H, H, K, K, H, H, H, W, W, W, W, W, W, W, W, W, W, H, H,
+    S, S, S, G, G, S, S, S, W, W, W, W, W, W, W, W, W, W, S, S,
+    S, G, G, G, G, S, S, S, W, I, I, W, D, D, W, I, I, W, S, S,  // 15
+    G, G, G, G, G, G, S, S, W, I, I, W, D, D, W, I, I, W, S, S,
+    G, G, G, G, G, G, G, G, W, W, W, W, D, D, W, W, W, W, G, G,
+    G, G, G, G, G, G, G, G, W, W, W, W, D, D, W, W, W, W, G, G,
+    G, G, G, G, G, G, G, G, S, S, S, S, S, S, S, S, S, S, G, G,
+    G, G, G, G, G, G, G, G, S, S, S, S, S, S, S, S, S, S, G, G,  // 20
     //      5         10        15        20
 };
 
@@ -80,399 +80,357 @@
 #undef K
 #undef M
 
-Scene::Scene(
-    int sensorWidthPx,
-    int sensorHeightPx,
-    float sensorSensitivity):
-        mSensorWidth(sensorWidthPx),
-        mSensorHeight(sensorHeightPx),
-        mHour(12),
-        mExposureDuration(0.033f),
-        mSensorSensitivity(sensorSensitivity)
-{
-    // Map scene to sensor pixels
-    if (mSensorWidth > mSensorHeight) {
-        mMapDiv = (mSensorWidth / (kSceneWidth + 1) ) + 1;
-    } else {
-        mMapDiv = (mSensorHeight / (kSceneHeight + 1) ) + 1;
-    }
-    mOffsetX = (kSceneWidth * mMapDiv - mSensorWidth) / 2;
-    mOffsetY = (kSceneHeight * mMapDiv - mSensorHeight) / 2;
+Scene::Scene(int sensorWidthPx, int sensorHeightPx, float sensorSensitivity)
+    : mSensorWidth(sensorWidthPx),
+      mSensorHeight(sensorHeightPx),
+      mHour(12),
+      mExposureDuration(0.033f),
+      mSensorSensitivity(sensorSensitivity) {
+  // Map scene to sensor pixels
+  if (mSensorWidth > mSensorHeight) {
+    mMapDiv = (mSensorWidth / (kSceneWidth + 1)) + 1;
+  } else {
+    mMapDiv = (mSensorHeight / (kSceneHeight + 1)) + 1;
+  }
+  mOffsetX = (kSceneWidth * mMapDiv - mSensorWidth) / 2;
+  mOffsetY = (kSceneHeight * mMapDiv - mSensorHeight) / 2;
 
-    // Assume that sensor filters are sRGB primaries to start
-    mFilterR[0]  =  3.2406f; mFilterR[1]  = -1.5372f; mFilterR[2]  = -0.4986f;
-    mFilterGr[0] = -0.9689f; mFilterGr[1] =  1.8758f; mFilterGr[2] =  0.0415f;
-    mFilterGb[0] = -0.9689f; mFilterGb[1] =  1.8758f; mFilterGb[2] =  0.0415f;
-    mFilterB[0]  =  0.0557f; mFilterB[1]  = -0.2040f; mFilterB[2]  =  1.0570f;
-
-
+  // Assume that sensor filters are sRGB primaries to start
+  mFilterR[0] = 3.2406f;
+  mFilterR[1] = -1.5372f;
+  mFilterR[2] = -0.4986f;
+  mFilterGr[0] = -0.9689f;
+  mFilterGr[1] = 1.8758f;
+  mFilterGr[2] = 0.0415f;
+  mFilterGb[0] = -0.9689f;
+  mFilterGb[1] = 1.8758f;
+  mFilterGb[2] = 0.0415f;
+  mFilterB[0] = 0.0557f;
+  mFilterB[1] = -0.2040f;
+  mFilterB[2] = 1.0570f;
 }
 
-Scene::~Scene() {
-}
+Scene::~Scene() {}
 
-void Scene::setColorFilterXYZ(
-        float rX, float rY, float rZ,
-        float grX, float grY, float grZ,
-        float gbX, float gbY, float gbZ,
-        float bX, float bY, float bZ) {
-    mFilterR[0]  = rX;  mFilterR[1]  = rY;  mFilterR[2]  = rZ;
-    mFilterGr[0] = grX; mFilterGr[1] = grY; mFilterGr[2] = grZ;
-    mFilterGb[0] = gbX; mFilterGb[1] = gbY; mFilterGb[2] = gbZ;
-    mFilterB[0]  = bX;  mFilterB[1]  = bY;  mFilterB[2]  = bZ;
+void Scene::setColorFilterXYZ(float rX, float rY, float rZ, float grX,
+                              float grY, float grZ, float gbX, float gbY,
+                              float gbZ, float bX, float bY, float bZ) {
+  mFilterR[0] = rX;
+  mFilterR[1] = rY;
+  mFilterR[2] = rZ;
+  mFilterGr[0] = grX;
+  mFilterGr[1] = grY;
+  mFilterGr[2] = grZ;
+  mFilterGb[0] = gbX;
+  mFilterGb[1] = gbY;
+  mFilterGb[2] = gbZ;
+  mFilterB[0] = bX;
+  mFilterB[1] = bY;
+  mFilterB[2] = bZ;
 }
 
 void Scene::setHour(int hour) {
-    ALOGV("Hour set to: %d", hour);
-    mHour = hour % 24;
+  ALOGV("Hour set to: %d", hour);
+  mHour = hour % 24;
 }
 
-int Scene::getHour() {
-    return mHour;
-}
+int Scene::getHour() { return mHour; }
 
-void Scene::setExposureDuration(float seconds) {
-    mExposureDuration = seconds;
-}
+void Scene::setExposureDuration(float seconds) { mExposureDuration = seconds; }
 
 void Scene::calculateScene(nsecs_t time) {
-    // Calculate time fractions for interpolation
-    int timeIdx = mHour / kTimeStep;
-    int nextTimeIdx = (timeIdx + 1) % (24 / kTimeStep);
-    const nsecs_t kOneHourInNsec = 1e9 * 60 * 60;
-    nsecs_t timeSinceIdx = (mHour - timeIdx * kTimeStep) * kOneHourInNsec + time;
-    float timeFrac = timeSinceIdx / (float)(kOneHourInNsec * kTimeStep);
+  // Calculate time fractions for interpolation
+  int timeIdx = mHour / kTimeStep;
+  int nextTimeIdx = (timeIdx + 1) % (24 / kTimeStep);
+  const nsecs_t kOneHourInNsec = 1e9 * 60 * 60;
+  nsecs_t timeSinceIdx = (mHour - timeIdx * kTimeStep) * kOneHourInNsec + time;
+  float timeFrac = timeSinceIdx / (float)(kOneHourInNsec * kTimeStep);
 
-    // Determine overall sunlight levels
-    float sunLux =
-            kSunlight[timeIdx] * (1 - timeFrac) +
-            kSunlight[nextTimeIdx] * timeFrac;
-    ALOGV("Sun lux: %f", sunLux);
+  // Determine overall sunlight levels
+  float sunLux =
+      kSunlight[timeIdx] * (1 - timeFrac) + kSunlight[nextTimeIdx] * timeFrac;
+  ALOGV("Sun lux: %f", sunLux);
 
-    float sunShadeLux = sunLux * (kDaylightShadeIllum / kDirectSunIllum);
+  float sunShadeLux = sunLux * (kDaylightShadeIllum / kDirectSunIllum);
 
-    // Determine sun/shade illumination chromaticity
-    float currentSunXY[2];
-    float currentShadeXY[2];
+  // Determine sun/shade illumination chromaticity
+  float currentSunXY[2];
+  float currentShadeXY[2];
 
-    const float *prevSunXY, *nextSunXY;
-    const float *prevShadeXY, *nextShadeXY;
-    if (kSunlight[timeIdx] == kSunsetIllum ||
-            kSunlight[timeIdx] == kTwilightIllum) {
-        prevSunXY = kSunsetXY;
-        prevShadeXY = kSunsetXY;
-    } else {
-        prevSunXY = kDirectSunlightXY;
-        prevShadeXY = kDaylightXY;
-    }
-    if (kSunlight[nextTimeIdx] == kSunsetIllum ||
-            kSunlight[nextTimeIdx] == kTwilightIllum) {
-        nextSunXY = kSunsetXY;
-        nextShadeXY = kSunsetXY;
-    } else {
-        nextSunXY = kDirectSunlightXY;
-        nextShadeXY = kDaylightXY;
-    }
-    currentSunXY[0] = prevSunXY[0] * (1 - timeFrac) +
-            nextSunXY[0] * timeFrac;
-    currentSunXY[1] = prevSunXY[1] * (1 - timeFrac) +
-            nextSunXY[1] * timeFrac;
+  const float *prevSunXY, *nextSunXY;
+  const float *prevShadeXY, *nextShadeXY;
+  if (kSunlight[timeIdx] == kSunsetIllum ||
+      kSunlight[timeIdx] == kTwilightIllum) {
+    prevSunXY = kSunsetXY;
+    prevShadeXY = kSunsetXY;
+  } else {
+    prevSunXY = kDirectSunlightXY;
+    prevShadeXY = kDaylightXY;
+  }
+  if (kSunlight[nextTimeIdx] == kSunsetIllum ||
+      kSunlight[nextTimeIdx] == kTwilightIllum) {
+    nextSunXY = kSunsetXY;
+    nextShadeXY = kSunsetXY;
+  } else {
+    nextSunXY = kDirectSunlightXY;
+    nextShadeXY = kDaylightXY;
+  }
+  currentSunXY[0] = prevSunXY[0] * (1 - timeFrac) + nextSunXY[0] * timeFrac;
+  currentSunXY[1] = prevSunXY[1] * (1 - timeFrac) + nextSunXY[1] * timeFrac;
 
-    currentShadeXY[0] = prevShadeXY[0] * (1 - timeFrac) +
-            nextShadeXY[0] * timeFrac;
-    currentShadeXY[1] = prevShadeXY[1] * (1 - timeFrac) +
-            nextShadeXY[1] * timeFrac;
+  currentShadeXY[0] =
+      prevShadeXY[0] * (1 - timeFrac) + nextShadeXY[0] * timeFrac;
+  currentShadeXY[1] =
+      prevShadeXY[1] * (1 - timeFrac) + nextShadeXY[1] * timeFrac;
 
-    ALOGV("Sun XY: %f, %f, Shade XY: %f, %f",
-            currentSunXY[0], currentSunXY[1],
-            currentShadeXY[0], currentShadeXY[1]);
+  ALOGV("Sun XY: %f, %f, Shade XY: %f, %f", currentSunXY[0], currentSunXY[1],
+        currentShadeXY[0], currentShadeXY[1]);
 
+  // Converting for xyY to XYZ:
+  // X = Y / y * x
+  // Y = Y
+  // Z = Y / y * (1 - x - y);
+  float sunXYZ[3] = {
+      sunLux / currentSunXY[1] * currentSunXY[0], sunLux,
+      sunLux / currentSunXY[1] * (1 - currentSunXY[0] - currentSunXY[1])};
+  float sunShadeXYZ[3] = {sunShadeLux / currentShadeXY[1] * currentShadeXY[0],
+                          sunShadeLux,
+                          sunShadeLux / currentShadeXY[1] *
+                              (1 - currentShadeXY[0] - currentShadeXY[1])};
+  ALOGV("Sun XYZ: %f, %f, %f", sunXYZ[0], sunXYZ[1], sunXYZ[2]);
+  ALOGV("Sun shade XYZ: %f, %f, %f", sunShadeXYZ[0], sunShadeXYZ[1],
+        sunShadeXYZ[2]);
+
+  // Determine moonlight levels
+  float moonLux =
+      kMoonlight[timeIdx] * (1 - timeFrac) + kMoonlight[nextTimeIdx] * timeFrac;
+  float moonShadeLux = moonLux * (kDaylightShadeIllum / kDirectSunIllum);
+
+  float moonXYZ[3] = {
+      moonLux / kMoonlightXY[1] * kMoonlightXY[0], moonLux,
+      moonLux / kMoonlightXY[1] * (1 - kMoonlightXY[0] - kMoonlightXY[1])};
+  float moonShadeXYZ[3] = {
+      moonShadeLux / kMoonlightXY[1] * kMoonlightXY[0], moonShadeLux,
+      moonShadeLux / kMoonlightXY[1] * (1 - kMoonlightXY[0] - kMoonlightXY[1])};
+
+  // Determine starlight level
+  const float kClearNightXYZ[3] = {
+      kClearNightIllum / kMoonlightXY[1] * kMoonlightXY[0], kClearNightIllum,
+      kClearNightIllum / kMoonlightXY[1] *
+          (1 - kMoonlightXY[0] - kMoonlightXY[1])};
+
+  // Calculate direct and shaded light
+  float directIllumXYZ[3] = {
+      sunXYZ[0] + moonXYZ[0] + kClearNightXYZ[0],
+      sunXYZ[1] + moonXYZ[1] + kClearNightXYZ[1],
+      sunXYZ[2] + moonXYZ[2] + kClearNightXYZ[2],
+  };
+
+  float shadeIllumXYZ[3] = {kClearNightXYZ[0], kClearNightXYZ[1],
+                            kClearNightXYZ[2]};
+
+  shadeIllumXYZ[0] += (mHour < kSunOverhead) ? sunXYZ[0] : sunShadeXYZ[0];
+  shadeIllumXYZ[1] += (mHour < kSunOverhead) ? sunXYZ[1] : sunShadeXYZ[1];
+  shadeIllumXYZ[2] += (mHour < kSunOverhead) ? sunXYZ[2] : sunShadeXYZ[2];
+
+  // Moon up period covers 23->0 transition, shift for simplicity
+  int adjHour = (mHour + 12) % 24;
+  int adjMoonOverhead = (kMoonOverhead + 12) % 24;
+  shadeIllumXYZ[0] +=
+      (adjHour < adjMoonOverhead) ? moonXYZ[0] : moonShadeXYZ[0];
+  shadeIllumXYZ[1] +=
+      (adjHour < adjMoonOverhead) ? moonXYZ[1] : moonShadeXYZ[1];
+  shadeIllumXYZ[2] +=
+      (adjHour < adjMoonOverhead) ? moonXYZ[2] : moonShadeXYZ[2];
+
+  ALOGV("Direct XYZ: %f, %f, %f", directIllumXYZ[0], directIllumXYZ[1],
+        directIllumXYZ[2]);
+  ALOGV("Shade XYZ: %f, %f, %f", shadeIllumXYZ[0], shadeIllumXYZ[1],
+        shadeIllumXYZ[2]);
+
+  for (int i = 0; i < NUM_MATERIALS; i++) {
     // Converting for xyY to XYZ:
     // X = Y / y * x
     // Y = Y
     // Z = Y / y * (1 - x - y);
-    float sunXYZ[3] = {
-        sunLux / currentSunXY[1] * currentSunXY[0],
-        sunLux,
-        sunLux / currentSunXY[1] *
-        (1 - currentSunXY[0] - currentSunXY[1])
-    };
-    float sunShadeXYZ[3] = {
-        sunShadeLux / currentShadeXY[1] * currentShadeXY[0],
-        sunShadeLux,
-        sunShadeLux / currentShadeXY[1] *
-        (1 - currentShadeXY[0] - currentShadeXY[1])
-    };
-    ALOGV("Sun XYZ: %f, %f, %f",
-            sunXYZ[0], sunXYZ[1], sunXYZ[2]);
-    ALOGV("Sun shade XYZ: %f, %f, %f",
-            sunShadeXYZ[0], sunShadeXYZ[1], sunShadeXYZ[2]);
+    float matXYZ[3] = {
+        kMaterials_xyY[i][2] / kMaterials_xyY[i][1] * kMaterials_xyY[i][0],
+        kMaterials_xyY[i][2],
+        kMaterials_xyY[i][2] / kMaterials_xyY[i][1] *
+            (1 - kMaterials_xyY[i][0] - kMaterials_xyY[i][1])};
 
-    // Determine moonlight levels
-    float moonLux =
-            kMoonlight[timeIdx] * (1 - timeFrac) +
-            kMoonlight[nextTimeIdx] * timeFrac;
-    float moonShadeLux = moonLux * (kDaylightShadeIllum / kDirectSunIllum);
+    if (kMaterialsFlags[i] == 0 || kMaterialsFlags[i] & kSky) {
+      matXYZ[0] *= directIllumXYZ[0];
+      matXYZ[1] *= directIllumXYZ[1];
+      matXYZ[2] *= directIllumXYZ[2];
+    } else if (kMaterialsFlags[i] & kShadowed) {
+      matXYZ[0] *= shadeIllumXYZ[0];
+      matXYZ[1] *= shadeIllumXYZ[1];
+      matXYZ[2] *= shadeIllumXYZ[2];
+    }  // else if (kMaterialsFlags[i] * kSelfLit), do nothing
 
-    float moonXYZ[3] = {
-        moonLux / kMoonlightXY[1] * kMoonlightXY[0],
-        moonLux,
-        moonLux / kMoonlightXY[1] *
-        (1 - kMoonlightXY[0] - kMoonlightXY[1])
-    };
-    float moonShadeXYZ[3] = {
-        moonShadeLux / kMoonlightXY[1] * kMoonlightXY[0],
-        moonShadeLux,
-        moonShadeLux / kMoonlightXY[1] *
-        (1 - kMoonlightXY[0] - kMoonlightXY[1])
-    };
+    ALOGV("Mat %d XYZ: %f, %f, %f", i, matXYZ[0], matXYZ[1], matXYZ[2]);
+    float luxToElectrons =
+        mSensorSensitivity * mExposureDuration / (kAperture * kAperture);
+    mCurrentColors[i * NUM_CHANNELS + 0] =
+        (mFilterR[0] * matXYZ[0] + mFilterR[1] * matXYZ[1] +
+         mFilterR[2] * matXYZ[2]) *
+        luxToElectrons;
+    mCurrentColors[i * NUM_CHANNELS + 1] =
+        (mFilterGr[0] * matXYZ[0] + mFilterGr[1] * matXYZ[1] +
+         mFilterGr[2] * matXYZ[2]) *
+        luxToElectrons;
+    mCurrentColors[i * NUM_CHANNELS + 2] =
+        (mFilterGb[0] * matXYZ[0] + mFilterGb[1] * matXYZ[1] +
+         mFilterGb[2] * matXYZ[2]) *
+        luxToElectrons;
+    mCurrentColors[i * NUM_CHANNELS + 3] =
+        (mFilterB[0] * matXYZ[0] + mFilterB[1] * matXYZ[1] +
+         mFilterB[2] * matXYZ[2]) *
+        luxToElectrons;
 
-    // Determine starlight level
-    const float kClearNightXYZ[3] = {
-        kClearNightIllum / kMoonlightXY[1] * kMoonlightXY[0],
-        kClearNightIllum,
-        kClearNightIllum / kMoonlightXY[1] *
-            (1 - kMoonlightXY[0] - kMoonlightXY[1])
-    };
+    ALOGV("Color %d RGGB: %d, %d, %d, %d", i,
+          mCurrentColors[i * NUM_CHANNELS + 0],
+          mCurrentColors[i * NUM_CHANNELS + 1],
+          mCurrentColors[i * NUM_CHANNELS + 2],
+          mCurrentColors[i * NUM_CHANNELS + 3]);
+  }
+  // Shake viewpoint; horizontal and vertical sinusoids at roughly
+  // human handshake frequencies
+  mHandshakeX = (kFreq1Magnitude * std::sin(kHorizShakeFreq1 * timeSinceIdx) +
+                 kFreq2Magnitude * std::sin(kHorizShakeFreq2 * timeSinceIdx)) *
+                mMapDiv * kShakeFraction;
 
-    // Calculate direct and shaded light
-    float directIllumXYZ[3] = {
-        sunXYZ[0] + moonXYZ[0] + kClearNightXYZ[0],
-        sunXYZ[1] + moonXYZ[1] + kClearNightXYZ[1],
-        sunXYZ[2] + moonXYZ[2] + kClearNightXYZ[2],
-    };
+  mHandshakeY = (kFreq1Magnitude * std::sin(kVertShakeFreq1 * timeSinceIdx) +
+                 kFreq2Magnitude * std::sin(kVertShakeFreq2 * timeSinceIdx)) *
+                mMapDiv * kShakeFraction;
 
-    float shadeIllumXYZ[3] = {
-        kClearNightXYZ[0],
-        kClearNightXYZ[1],
-        kClearNightXYZ[2]
-    };
-
-    shadeIllumXYZ[0] += (mHour < kSunOverhead) ? sunXYZ[0] : sunShadeXYZ[0];
-    shadeIllumXYZ[1] += (mHour < kSunOverhead) ? sunXYZ[1] : sunShadeXYZ[1];
-    shadeIllumXYZ[2] += (mHour < kSunOverhead) ? sunXYZ[2] : sunShadeXYZ[2];
-
-    // Moon up period covers 23->0 transition, shift for simplicity
-    int adjHour = (mHour + 12) % 24;
-    int adjMoonOverhead = (kMoonOverhead + 12 ) % 24;
-    shadeIllumXYZ[0] += (adjHour < adjMoonOverhead) ?
-            moonXYZ[0] : moonShadeXYZ[0];
-    shadeIllumXYZ[1] += (adjHour < adjMoonOverhead) ?
-            moonXYZ[1] : moonShadeXYZ[1];
-    shadeIllumXYZ[2] += (adjHour < adjMoonOverhead) ?
-            moonXYZ[2] : moonShadeXYZ[2];
-
-    ALOGV("Direct XYZ: %f, %f, %f",
-            directIllumXYZ[0],directIllumXYZ[1],directIllumXYZ[2]);
-    ALOGV("Shade XYZ: %f, %f, %f",
-            shadeIllumXYZ[0], shadeIllumXYZ[1], shadeIllumXYZ[2]);
-
-    for (int i = 0; i < NUM_MATERIALS; i++) {
-        // Converting for xyY to XYZ:
-        // X = Y / y * x
-        // Y = Y
-        // Z = Y / y * (1 - x - y);
-        float matXYZ[3] = {
-            kMaterials_xyY[i][2] / kMaterials_xyY[i][1] *
-              kMaterials_xyY[i][0],
-            kMaterials_xyY[i][2],
-            kMaterials_xyY[i][2] / kMaterials_xyY[i][1] *
-              (1 - kMaterials_xyY[i][0] - kMaterials_xyY[i][1])
-        };
-
-        if (kMaterialsFlags[i] == 0 || kMaterialsFlags[i] & kSky) {
-            matXYZ[0] *= directIllumXYZ[0];
-            matXYZ[1] *= directIllumXYZ[1];
-            matXYZ[2] *= directIllumXYZ[2];
-        } else if (kMaterialsFlags[i] & kShadowed) {
-            matXYZ[0] *= shadeIllumXYZ[0];
-            matXYZ[1] *= shadeIllumXYZ[1];
-            matXYZ[2] *= shadeIllumXYZ[2];
-        } // else if (kMaterialsFlags[i] * kSelfLit), do nothing
-
-        ALOGV("Mat %d XYZ: %f, %f, %f", i, matXYZ[0], matXYZ[1], matXYZ[2]);
-        float luxToElectrons = mSensorSensitivity * mExposureDuration /
-                (kAperture * kAperture);
-        mCurrentColors[i*NUM_CHANNELS + 0] =
-                (mFilterR[0] * matXYZ[0] +
-                 mFilterR[1] * matXYZ[1] +
-                 mFilterR[2] * matXYZ[2])
-                * luxToElectrons;
-        mCurrentColors[i*NUM_CHANNELS + 1] =
-                (mFilterGr[0] * matXYZ[0] +
-                 mFilterGr[1] * matXYZ[1] +
-                 mFilterGr[2] * matXYZ[2])
-                * luxToElectrons;
-        mCurrentColors[i*NUM_CHANNELS + 2] =
-                (mFilterGb[0] * matXYZ[0] +
-                 mFilterGb[1] * matXYZ[1] +
-                 mFilterGb[2] * matXYZ[2])
-                * luxToElectrons;
-        mCurrentColors[i*NUM_CHANNELS + 3] =
-                (mFilterB[0] * matXYZ[0] +
-                 mFilterB[1] * matXYZ[1] +
-                 mFilterB[2] * matXYZ[2])
-                * luxToElectrons;
-
-        ALOGV("Color %d RGGB: %d, %d, %d, %d", i,
-                mCurrentColors[i*NUM_CHANNELS + 0],
-                mCurrentColors[i*NUM_CHANNELS + 1],
-                mCurrentColors[i*NUM_CHANNELS + 2],
-                mCurrentColors[i*NUM_CHANNELS + 3]);
-    }
-    // Shake viewpoint; horizontal and vertical sinusoids at roughly
-    // human handshake frequencies
-    mHandshakeX =
-            ( kFreq1Magnitude * std::sin(kHorizShakeFreq1 * timeSinceIdx) +
-              kFreq2Magnitude * std::sin(kHorizShakeFreq2 * timeSinceIdx) ) *
-            mMapDiv * kShakeFraction;
-
-    mHandshakeY =
-            ( kFreq1Magnitude * std::sin(kVertShakeFreq1 * timeSinceIdx) +
-              kFreq2Magnitude * std::sin(kVertShakeFreq2 * timeSinceIdx) ) *
-            mMapDiv * kShakeFraction;
-
-    // Set starting pixel
-    setReadoutPixel(0,0);
+  // Set starting pixel
+  setReadoutPixel(0, 0);
 }
 
 void Scene::setReadoutPixel(int x, int y) {
-    mCurrentX = x;
-    mCurrentY = y;
-    mSubX = (x + mOffsetX + mHandshakeX) % mMapDiv;
-    mSubY = (y + mOffsetY + mHandshakeY) % mMapDiv;
-    mSceneX = (x + mOffsetX + mHandshakeX) / mMapDiv;
-    mSceneY = (y + mOffsetY + mHandshakeY) / mMapDiv;
-    mSceneIdx = mSceneY * kSceneWidth + mSceneX;
-    mCurrentSceneMaterial = &(mCurrentColors[kScene[mSceneIdx]]);
+  mCurrentX = x;
+  mCurrentY = y;
+  mSubX = (x + mOffsetX + mHandshakeX) % mMapDiv;
+  mSubY = (y + mOffsetY + mHandshakeY) % mMapDiv;
+  mSceneX = (x + mOffsetX + mHandshakeX) / mMapDiv;
+  mSceneY = (y + mOffsetY + mHandshakeY) / mMapDiv;
+  mSceneIdx = mSceneY * kSceneWidth + mSceneX;
+  mCurrentSceneMaterial = &(mCurrentColors[kScene[mSceneIdx]]);
 }
 
-const uint32_t* Scene::getPixelElectrons() {
-    const uint32_t *pixel = mCurrentSceneMaterial;
-    mCurrentX++;
-    mSubX++;
-    if (mCurrentX >= mSensorWidth) {
-        mCurrentX = 0;
-        mCurrentY++;
-        if (mCurrentY >= mSensorHeight) mCurrentY = 0;
-        setReadoutPixel(mCurrentX, mCurrentY);
-    } else if (mSubX > mMapDiv) {
-        mSceneIdx++;
-        mSceneX++;
-        mCurrentSceneMaterial = &(mCurrentColors[kScene[mSceneIdx]]);
-        mSubX = 0;
-    }
-    return pixel;
+const uint32_t *Scene::getPixelElectrons() {
+  const uint32_t *pixel = mCurrentSceneMaterial;
+  mCurrentX++;
+  mSubX++;
+  if (mCurrentX >= mSensorWidth) {
+    mCurrentX = 0;
+    mCurrentY++;
+    if (mCurrentY >= mSensorHeight) mCurrentY = 0;
+    setReadoutPixel(mCurrentX, mCurrentY);
+  } else if (mSubX > mMapDiv) {
+    mSceneIdx++;
+    mSceneX++;
+    mCurrentSceneMaterial = &(mCurrentColors[kScene[mSceneIdx]]);
+    mSubX = 0;
+  }
+  return pixel;
 }
 
 // Handshake model constants.
 // Frequencies measured in a nanosecond timebase
-const float Scene::kHorizShakeFreq1 = 2 * M_PI * 2  / 1e9; // 2 Hz
-const float Scene::kHorizShakeFreq2 = 2 * M_PI * 13 / 1e9; // 13 Hz
-const float Scene::kVertShakeFreq1  = 2 * M_PI * 3  / 1e9; // 3 Hz
-const float Scene::kVertShakeFreq2  = 2 * M_PI * 11 / 1e9; // 1 Hz
-const float Scene::kFreq1Magnitude  = 5;
-const float Scene::kFreq2Magnitude  = 1;
-const float Scene::kShakeFraction   = 0.03; // As a fraction of a scene tile
+const float Scene::kHorizShakeFreq1 = 2 * M_PI * 2 / 1e9;   // 2 Hz
+const float Scene::kHorizShakeFreq2 = 2 * M_PI * 13 / 1e9;  // 13 Hz
+const float Scene::kVertShakeFreq1 = 2 * M_PI * 3 / 1e9;    // 3 Hz
+const float Scene::kVertShakeFreq2 = 2 * M_PI * 11 / 1e9;   // 1 Hz
+const float Scene::kFreq1Magnitude = 5;
+const float Scene::kFreq2Magnitude = 1;
+const float Scene::kShakeFraction = 0.03;  // As a fraction of a scene tile
 
 // RGB->YUV, Jpeg standard
 const float Scene::kRgb2Yuv[12] = {
-       0.299f,    0.587f,    0.114f,    0.f,
-    -0.16874f, -0.33126f,      0.5f, -128.f,
-         0.5f, -0.41869f, -0.08131f, -128.f,
+    0.299f, 0.587f, 0.114f, 0.f,       -0.16874f, -0.33126f,
+    0.5f,   -128.f, 0.5f,   -0.41869f, -0.08131f, -128.f,
 };
 
 // Aperture of imaging lens
 const float Scene::kAperture = 2.8;
 
 // Sun illumination levels through the day
-const float Scene::kSunlight[24/kTimeStep] =
-{
-    0, // 00:00
-    0,
-    0,
-    kTwilightIllum, // 06:00
-    kDirectSunIllum,
-    kDirectSunIllum,
-    kDirectSunIllum, // 12:00
-    kDirectSunIllum,
-    kDirectSunIllum,
-    kSunsetIllum, // 18:00
-    kTwilightIllum,
-    0
-};
+const float Scene::kSunlight[24 / kTimeStep] = {0,  // 00:00
+                                                0,
+                                                0,
+                                                kTwilightIllum,  // 06:00
+                                                kDirectSunIllum,
+                                                kDirectSunIllum,
+                                                kDirectSunIllum,  // 12:00
+                                                kDirectSunIllum,
+                                                kDirectSunIllum,
+                                                kSunsetIllum,  // 18:00
+                                                kTwilightIllum,
+                                                0};
 
 // Moon illumination levels through the day
-const float Scene::kMoonlight[24/kTimeStep] =
-{
-    kFullMoonIllum, // 00:00
-    kFullMoonIllum,
-    0,
-    0, // 06:00
-    0,
-    0,
-    0, // 12:00
-    0,
-    0,
-    0, // 18:00
-    0,
-    kFullMoonIllum
-};
+const float Scene::kMoonlight[24 / kTimeStep] = {kFullMoonIllum,  // 00:00
+                                                 kFullMoonIllum,
+                                                 0,
+                                                 0,  // 06:00
+                                                 0,
+                                                 0,
+                                                 0,  // 12:00
+                                                 0,
+                                                 0,
+                                                 0,  // 18:00
+                                                 0,
+                                                 kFullMoonIllum};
 
 const int Scene::kSunOverhead = 12;
 const int Scene::kMoonOverhead = 0;
 
 // Used for sun illumination levels
-const float Scene::kDirectSunIllum     = 100000;
-const float Scene::kSunsetIllum        = 400;
-const float Scene::kTwilightIllum      = 4;
+const float Scene::kDirectSunIllum = 100000;
+const float Scene::kSunsetIllum = 400;
+const float Scene::kTwilightIllum = 4;
 // Used for moon illumination levels
-const float Scene::kFullMoonIllum      = 1;
+const float Scene::kFullMoonIllum = 1;
 // Other illumination levels
 const float Scene::kDaylightShadeIllum = 20000;
-const float Scene::kClearNightIllum    = 2e-3;
-const float Scene::kStarIllum          = 2e-6;
-const float Scene::kLivingRoomIllum    = 50;
+const float Scene::kClearNightIllum = 2e-3;
+const float Scene::kStarIllum = 2e-6;
+const float Scene::kLivingRoomIllum = 50;
 
-const float Scene::kIncandescentXY[2]   = { 0.44757f, 0.40745f};
-const float Scene::kDirectSunlightXY[2] = { 0.34842f, 0.35161f};
-const float Scene::kDaylightXY[2]       = { 0.31271f, 0.32902f};
-const float Scene::kNoonSkyXY[2]        = { 0.346f,   0.359f};
-const float Scene::kMoonlightXY[2]      = { 0.34842f, 0.35161f};
-const float Scene::kSunsetXY[2]         = { 0.527f,   0.413f};
+const float Scene::kIncandescentXY[2] = {0.44757f, 0.40745f};
+const float Scene::kDirectSunlightXY[2] = {0.34842f, 0.35161f};
+const float Scene::kDaylightXY[2] = {0.31271f, 0.32902f};
+const float Scene::kNoonSkyXY[2] = {0.346f, 0.359f};
+const float Scene::kMoonlightXY[2] = {0.34842f, 0.35161f};
+const float Scene::kSunsetXY[2] = {0.527f, 0.413f};
 
-const uint8_t Scene::kSelfLit  = 0x01;
+const uint8_t Scene::kSelfLit = 0x01;
 const uint8_t Scene::kShadowed = 0x02;
-const uint8_t Scene::kSky      = 0x04;
+const uint8_t Scene::kSky = 0x04;
 
 // For non-self-lit materials, the Y component is normalized with 1=full
 // reflectance; for self-lit materials, it's the constant illuminance in lux.
 const float Scene::kMaterials_xyY[Scene::NUM_MATERIALS][3] = {
-    { 0.3688f, 0.4501f, .1329f }, // GRASS
-    { 0.3688f, 0.4501f, .1329f }, // GRASS_SHADOW
-    { 0.3986f, 0.5002f, .4440f }, // HILL
-    { 0.3262f, 0.5040f, .2297f }, // WALL
-    { 0.4336f, 0.3787f, .1029f }, // ROOF
-    { 0.3316f, 0.2544f, .0639f }, // DOOR
-    { 0.3425f, 0.3577f, .0887f }, // CHIMNEY
-    { kIncandescentXY[0], kIncandescentXY[1], kLivingRoomIllum }, // WINDOW
-    { kDirectSunlightXY[0], kDirectSunlightXY[1], kDirectSunIllum }, // SUN
-    { kNoonSkyXY[0], kNoonSkyXY[1], kDaylightShadeIllum / kDirectSunIllum }, // SKY
-    { kMoonlightXY[0], kMoonlightXY[1], kFullMoonIllum } // MOON
+    {0.3688f, 0.4501f, .1329f},                                  // GRASS
+    {0.3688f, 0.4501f, .1329f},                                  // GRASS_SHADOW
+    {0.3986f, 0.5002f, .4440f},                                  // HILL
+    {0.3262f, 0.5040f, .2297f},                                  // WALL
+    {0.4336f, 0.3787f, .1029f},                                  // ROOF
+    {0.3316f, 0.2544f, .0639f},                                  // DOOR
+    {0.3425f, 0.3577f, .0887f},                                  // CHIMNEY
+    {kIncandescentXY[0], kIncandescentXY[1], kLivingRoomIllum},  // WINDOW
+    {kDirectSunlightXY[0], kDirectSunlightXY[1], kDirectSunIllum},  // SUN
+    {kNoonSkyXY[0], kNoonSkyXY[1],
+     kDaylightShadeIllum / kDirectSunIllum},            // SKY
+    {kMoonlightXY[0], kMoonlightXY[1], kFullMoonIllum}  // MOON
 };
 
 const uint8_t Scene::kMaterialsFlags[Scene::NUM_MATERIALS] = {
-    0,
-    kShadowed,
-    kShadowed,
-    kShadowed,
-    kShadowed,
-    kShadowed,
-    kShadowed,
-    kSelfLit,
-    kSelfLit,
-    kSky,
-    kSelfLit,
+    0,         kShadowed, kShadowed, kShadowed, kShadowed, kShadowed,
+    kShadowed, kSelfLit,  kSelfLit,  kSky,      kSelfLit,
 };
 
-} // namespace android
+}  // namespace android
diff --git a/guest/hals/camera/fake-pipeline2/Scene.h b/guest/hals/camera/fake-pipeline2/Scene.h
index 66d1a69..5e86861 100644
--- a/guest/hals/camera/fake-pipeline2/Scene.h
+++ b/guest/hals/camera/fake-pipeline2/Scene.h
@@ -31,161 +31,148 @@
 namespace android {
 
 class Scene {
-  public:
-    Scene(int sensorWidthPx,
-            int sensorHeightPx,
-            float sensorSensitivity);
-    ~Scene();
+ public:
+  Scene(int sensorWidthPx, int sensorHeightPx, float sensorSensitivity);
+  ~Scene();
 
-    // Set the filter coefficients for the red, green, and blue filters on the
-    // sensor. Used as an optimization to pre-calculate various illuminance
-    // values. Two different green filters can be provided, to account for
-    // possible cross-talk on a Bayer sensor. Must be called before
-    // calculateScene.
-    void setColorFilterXYZ(
-        float rX, float rY, float rZ,
-        float grX, float grY, float grZ,
-        float gbX, float gbY, float gbZ,
-        float bX, float bY, float bZ);
+  // Set the filter coefficients for the red, green, and blue filters on the
+  // sensor. Used as an optimization to pre-calculate various illuminance
+  // values. Two different green filters can be provided, to account for
+  // possible cross-talk on a Bayer sensor. Must be called before
+  // calculateScene.
+  void setColorFilterXYZ(float rX, float rY, float rZ, float grX, float grY,
+                         float grZ, float gbX, float gbY, float gbZ, float bX,
+                         float bY, float bZ);
 
-    // Set time of day (24-hour clock). This controls the general light levels
-    // in the scene. Must be called before calculateScene
-    void setHour(int hour);
-    // Get current hour
-    int getHour();
+  // Set time of day (24-hour clock). This controls the general light levels
+  // in the scene. Must be called before calculateScene
+  void setHour(int hour);
+  // Get current hour
+  int getHour();
 
-    // Set the duration of exposure for determining luminous exposure.
-    // Must be called before calculateScene
-    void setExposureDuration(float seconds);
+  // Set the duration of exposure for determining luminous exposure.
+  // Must be called before calculateScene
+  void setExposureDuration(float seconds);
 
-    // Calculate scene information for current hour and the time offset since
-    // the hour. Must be called at least once before calling getLuminousExposure.
-    // Resets pixel readout location to 0,0
-    void calculateScene(nsecs_t time);
+  // Calculate scene information for current hour and the time offset since
+  // the hour. Must be called at least once before calling getLuminousExposure.
+  // Resets pixel readout location to 0,0
+  void calculateScene(nsecs_t time);
 
-    // Set sensor pixel readout location.
-    void setReadoutPixel(int x, int y);
+  // Set sensor pixel readout location.
+  void setReadoutPixel(int x, int y);
 
-    // Get sensor response in physical units (electrons) for light hitting the
-    // current readout pixel, after passing through color filters. The readout
-    // pixel will be auto-incremented. The returned array can be indexed with
-    // ColorChannels.
-    const uint32_t* getPixelElectrons();
+  // Get sensor response in physical units (electrons) for light hitting the
+  // current readout pixel, after passing through color filters. The readout
+  // pixel will be auto-incremented. The returned array can be indexed with
+  // ColorChannels.
+  const uint32_t* getPixelElectrons();
 
-    enum ColorChannels {
-        R = 0,
-        Gr,
-        Gb,
-        B,
-        Y,
-        Cb,
-        Cr,
-        NUM_CHANNELS
-    };
+  enum ColorChannels { R = 0, Gr, Gb, B, Y, Cb, Cr, NUM_CHANNELS };
 
-  private:
-    // Sensor color filtering coefficients in XYZ
-    float mFilterR[3];
-    float mFilterGr[3];
-    float mFilterGb[3];
-    float mFilterB[3];
+ private:
+  // Sensor color filtering coefficients in XYZ
+  float mFilterR[3];
+  float mFilterGr[3];
+  float mFilterGb[3];
+  float mFilterB[3];
 
-    int mOffsetX, mOffsetY;
-    int mMapDiv;
+  int mOffsetX, mOffsetY;
+  int mMapDiv;
 
-    int mHandshakeX, mHandshakeY;
+  int mHandshakeX, mHandshakeY;
 
-    int mSensorWidth;
-    int mSensorHeight;
-    int mCurrentX;
-    int mCurrentY;
-    int mSubX;
-    int mSubY;
-    int mSceneX;
-    int mSceneY;
-    int mSceneIdx;
-    uint32_t *mCurrentSceneMaterial;
+  int mSensorWidth;
+  int mSensorHeight;
+  int mCurrentX;
+  int mCurrentY;
+  int mSubX;
+  int mSubY;
+  int mSceneX;
+  int mSceneY;
+  int mSceneIdx;
+  uint32_t* mCurrentSceneMaterial;
 
-    int mHour;
-    float mExposureDuration;
-    float mSensorSensitivity;
+  int mHour;
+  float mExposureDuration;
+  float mSensorSensitivity;
 
-    enum Materials {
-        GRASS = 0,
-        GRASS_SHADOW,
-        HILL,
-        WALL,
-        ROOF,
-        DOOR,
-        CHIMNEY,
-        WINDOW,
-        SUN,
-        SKY,
-        MOON,
-        NUM_MATERIALS
-    };
+  enum Materials {
+    GRASS = 0,
+    GRASS_SHADOW,
+    HILL,
+    WALL,
+    ROOF,
+    DOOR,
+    CHIMNEY,
+    WINDOW,
+    SUN,
+    SKY,
+    MOON,
+    NUM_MATERIALS
+  };
 
-    uint32_t mCurrentColors[NUM_MATERIALS*NUM_CHANNELS];
+  uint32_t mCurrentColors[NUM_MATERIALS * NUM_CHANNELS];
 
-    /**
-     * Constants for scene definition. These are various degrees of approximate.
-     */
+  /**
+   * Constants for scene definition. These are various degrees of approximate.
+   */
 
-    // Fake handshake parameters. Two shake frequencies per axis, plus magnitude
-    // as a fraction of a scene tile, and relative magnitudes for the frequencies
-    static const float kHorizShakeFreq1;
-    static const float kHorizShakeFreq2;
-    static const float kVertShakeFreq1;
-    static const float kVertShakeFreq2;
-    static const float kFreq1Magnitude;
-    static const float kFreq2Magnitude;
+  // Fake handshake parameters. Two shake frequencies per axis, plus magnitude
+  // as a fraction of a scene tile, and relative magnitudes for the frequencies
+  static const float kHorizShakeFreq1;
+  static const float kHorizShakeFreq2;
+  static const float kVertShakeFreq1;
+  static const float kVertShakeFreq2;
+  static const float kFreq1Magnitude;
+  static const float kFreq2Magnitude;
 
-    static const float kShakeFraction;
+  static const float kShakeFraction;
 
-    // RGB->YUV conversion
-    static const float kRgb2Yuv[12];
+  // RGB->YUV conversion
+  static const float kRgb2Yuv[12];
 
-    // Aperture of imaging lens
-    static const float kAperture;
+  // Aperture of imaging lens
+  static const float kAperture;
 
-    // Sun, moon illuminance levels in 2-hour increments. These don't match any
-    // real day anywhere.
-    static const uint32_t kTimeStep = 2;
-    static const float kSunlight[];
-    static const float kMoonlight[];
-    static const int kSunOverhead;
-    static const int kMoonOverhead;
+  // Sun, moon illuminance levels in 2-hour increments. These don't match any
+  // real day anywhere.
+  static const uint32_t kTimeStep = 2;
+  static const float kSunlight[];
+  static const float kMoonlight[];
+  static const int kSunOverhead;
+  static const int kMoonOverhead;
 
-    // Illumination levels for various conditions, in lux
-    static const float kDirectSunIllum;
-    static const float kDaylightShadeIllum;
-    static const float kSunsetIllum;
-    static const float kTwilightIllum;
-    static const float kFullMoonIllum;
-    static const float kClearNightIllum;
-    static const float kStarIllum;
-    static const float kLivingRoomIllum;
+  // Illumination levels for various conditions, in lux
+  static const float kDirectSunIllum;
+  static const float kDaylightShadeIllum;
+  static const float kSunsetIllum;
+  static const float kTwilightIllum;
+  static const float kFullMoonIllum;
+  static const float kClearNightIllum;
+  static const float kStarIllum;
+  static const float kLivingRoomIllum;
 
-    // Chromaticity of various illumination sources
-    static const float kIncandescentXY[2];
-    static const float kDirectSunlightXY[2];
-    static const float kDaylightXY[2];
-    static const float kNoonSkyXY[2];
-    static const float kMoonlightXY[2];
-    static const float kSunsetXY[2];
+  // Chromaticity of various illumination sources
+  static const float kIncandescentXY[2];
+  static const float kDirectSunlightXY[2];
+  static const float kDaylightXY[2];
+  static const float kNoonSkyXY[2];
+  static const float kMoonlightXY[2];
+  static const float kSunsetXY[2];
 
-    static const uint8_t kSelfLit;
-    static const uint8_t kShadowed;
-    static const uint8_t kSky;
+  static const uint8_t kSelfLit;
+  static const uint8_t kShadowed;
+  static const uint8_t kSky;
 
-    static const float kMaterials_xyY[NUM_MATERIALS][3];
-    static const uint8_t kMaterialsFlags[NUM_MATERIALS];
+  static const float kMaterials_xyY[NUM_MATERIALS][3];
+  static const uint8_t kMaterialsFlags[NUM_MATERIALS];
 
-    static const int kSceneWidth;
-    static const int kSceneHeight;
-    static const uint8_t kScene[];
+  static const int kSceneWidth;
+  static const int kSceneHeight;
+  static const uint8_t kScene[];
 };
 
-}
+}  // namespace android
 
-#endif // HW_EMULATOR_CAMERA2_SCENE_H
+#endif  // HW_EMULATOR_CAMERA2_SCENE_H
diff --git a/guest/hals/camera/fake-pipeline2/Sensor.cpp b/guest/hals/camera/fake-pipeline2/Sensor.cpp
index 108e336..78dc8fd 100644
--- a/guest/hals/camera/fake-pipeline2/Sensor.cpp
+++ b/guest/hals/camera/fake-pipeline2/Sensor.cpp
@@ -26,23 +26,23 @@
 
 #include <utils/Log.h>
 
-#include "../EmulatedFakeCamera2.h"
-#include "Sensor.h"
 #include <cmath>
 #include <cstdlib>
+#include "../EmulatedFakeCamera2.h"
+#include "Sensor.h"
 #include "guest/libs/platform_support/api_level_fixes.h"
 #include "system/camera_metadata.h"
 
 namespace android {
 
-//const nsecs_t Sensor::kExposureTimeRange[2] =
+// const nsecs_t Sensor::kExposureTimeRange[2] =
 //    {1000L, 30000000000L} ; // 1 us - 30 sec
-//const nsecs_t Sensor::kFrameDurationRange[2] =
+// const nsecs_t Sensor::kFrameDurationRange[2] =
 //    {33331760L, 30000000000L}; // ~1/30 s - 30 sec
-const nsecs_t Sensor::kExposureTimeRange[2] =
-    {1000L, 300000000L} ; // 1 us - 0.3 sec
-const nsecs_t Sensor::kFrameDurationRange[2] =
-    {33331760L, 300000000L}; // ~1/30 s - 0.3 sec
+const nsecs_t Sensor::kExposureTimeRange[2] = {1000L,
+                                               300000000L};  // 1 us - 0.3 sec
+const nsecs_t Sensor::kFrameDurationRange[2] = {
+    33331760L, 300000000L};  // ~1/30 s - 0.3 sec
 
 const nsecs_t Sensor::kMinVerticalBlank = 10000L;
 
@@ -51,28 +51,26 @@
 
 // Output image data characteristics
 const uint32_t Sensor::kMaxRawValue = 4000;
-const uint32_t Sensor::kBlackLevel  = 1000;
+const uint32_t Sensor::kBlackLevel = 1000;
 
 // Sensor sensitivity
-const float Sensor::kSaturationVoltage      = 0.520f;
+const float Sensor::kSaturationVoltage = 0.520f;
 const uint32_t Sensor::kSaturationElectrons = 2000;
-const float Sensor::kVoltsPerLuxSecond      = 0.100f;
+const float Sensor::kVoltsPerLuxSecond = 0.100f;
 
-const float Sensor::kElectronsPerLuxSecond =
-        Sensor::kSaturationElectrons / Sensor::kSaturationVoltage
-        * Sensor::kVoltsPerLuxSecond;
+const float Sensor::kElectronsPerLuxSecond = Sensor::kSaturationElectrons /
+                                             Sensor::kSaturationVoltage *
+                                             Sensor::kVoltsPerLuxSecond;
 
-const float Sensor::kBaseGainFactor = (float)Sensor::kMaxRawValue /
-            Sensor::kSaturationElectrons;
+const float Sensor::kBaseGainFactor =
+    (float)Sensor::kMaxRawValue / Sensor::kSaturationElectrons;
 
-const float Sensor::kReadNoiseStddevBeforeGain = 1.177; // in electrons
-const float Sensor::kReadNoiseStddevAfterGain =  2.100; // in digital counts
+const float Sensor::kReadNoiseStddevBeforeGain = 1.177;  // in electrons
+const float Sensor::kReadNoiseStddevAfterGain = 2.100;   // in digital counts
 const float Sensor::kReadNoiseVarBeforeGain =
-            Sensor::kReadNoiseStddevBeforeGain *
-            Sensor::kReadNoiseStddevBeforeGain;
+    Sensor::kReadNoiseStddevBeforeGain * Sensor::kReadNoiseStddevBeforeGain;
 const float Sensor::kReadNoiseVarAfterGain =
-            Sensor::kReadNoiseStddevAfterGain *
-            Sensor::kReadNoiseStddevAfterGain;
+    Sensor::kReadNoiseStddevAfterGain * Sensor::kReadNoiseStddevAfterGain;
 
 const int32_t Sensor::kSensitivityRange[2] = {100, 1600};
 const uint32_t Sensor::kDefaultSensitivity = 100;
@@ -82,534 +80,517 @@
 // Take advantage of IEEE floating-point format to calculate an approximate
 // square root. Accurate to within +-3.6%
 float sqrtf_approx(float r) {
-    // Modifier is based on IEEE floating-point representation; the
-    // manipulations boil down to finding approximate log2, dividing by two, and
-    // then inverting the log2. A bias is added to make the relative error
-    // symmetric about the real answer.
-    const int32_t modifier = 0x1FBB4000;
+  // Modifier is based on IEEE floating-point representation; the
+  // manipulations boil down to finding approximate log2, dividing by two, and
+  // then inverting the log2. A bias is added to make the relative error
+  // symmetric about the real answer.
+  const int32_t modifier = 0x1FBB4000;
 
-    int32_t r_i = *(int32_t*)(&r);
-    r_i = (r_i >> 1) + modifier;
+  int32_t r_i = *(int32_t *)(&r);
+  r_i = (r_i >> 1) + modifier;
 
-    return *(float*)(&r_i);
+  return *(float *)(&r_i);
 }
 
-
-
-Sensor::Sensor(uint32_t width, uint32_t height):
-        Thread(false),
-        mResolution{width, height},
-        mActiveArray{0, 0, width, height},
-        mRowReadoutTime(kFrameDurationRange[0] / height),
-        mGotVSync(false),
-        mExposureTime(kFrameDurationRange[0]-kMinVerticalBlank),
-        mFrameDuration(kFrameDurationRange[0]),
-        mGainFactor(kDefaultSensitivity),
-        mNextBuffers(NULL),
-        mFrameNumber(0),
-        mCapturedBuffers(NULL),
-        mListener(NULL),
-        mScene(width, height, kElectronsPerLuxSecond)
-{
-    ALOGV("Sensor created with pixel array %d x %d", width, height);
+Sensor::Sensor(uint32_t width, uint32_t height)
+    : Thread(false),
+      mResolution{width, height},
+      mActiveArray{0, 0, width, height},
+      mRowReadoutTime(kFrameDurationRange[0] / height),
+      mGotVSync(false),
+      mExposureTime(kFrameDurationRange[0] - kMinVerticalBlank),
+      mFrameDuration(kFrameDurationRange[0]),
+      mGainFactor(kDefaultSensitivity),
+      mNextBuffers(NULL),
+      mFrameNumber(0),
+      mCapturedBuffers(NULL),
+      mListener(NULL),
+      mScene(width, height, kElectronsPerLuxSecond) {
+  ALOGV("Sensor created with pixel array %d x %d", width, height);
 }
 
-Sensor::~Sensor() {
-    shutDown();
-}
+Sensor::~Sensor() { shutDown(); }
 
 status_t Sensor::startUp() {
-    ALOGV("%s: E", __FUNCTION__);
+  ALOGV("%s: E", __FUNCTION__);
 
-    int res;
-    mCapturedBuffers = NULL;
-    res = run("EmulatedFakeCamera2::Sensor",
-            ANDROID_PRIORITY_URGENT_DISPLAY);
+  int res;
+  mCapturedBuffers = NULL;
+  res = run("EmulatedFakeCamera2::Sensor", ANDROID_PRIORITY_URGENT_DISPLAY);
 
-    if (res != OK) {
-        ALOGE("Unable to start up sensor capture thread: %d", res);
-    }
-    return res;
+  if (res != OK) {
+    ALOGE("Unable to start up sensor capture thread: %d", res);
+  }
+  return res;
 }
 
 status_t Sensor::shutDown() {
-    ALOGV("%s: E", __FUNCTION__);
+  ALOGV("%s: E", __FUNCTION__);
 
-    int res;
-    res = requestExitAndWait();
-    if (res != OK) {
-        ALOGE("Unable to shut down sensor capture thread: %d", res);
-    }
-    return res;
+  int res;
+  res = requestExitAndWait();
+  if (res != OK) {
+    ALOGE("Unable to shut down sensor capture thread: %d", res);
+  }
+  return res;
 }
 
-Scene &Sensor::getScene() {
-    return mScene;
-}
+Scene &Sensor::getScene() { return mScene; }
 
 void Sensor::setExposureTime(uint64_t ns) {
-    Mutex::Autolock lock(mControlMutex);
-    ALOGVV("Exposure set to %f", ns/1000000.f);
-    mExposureTime = ns;
+  Mutex::Autolock lock(mControlMutex);
+  ALOGVV("Exposure set to %f", ns / 1000000.f);
+  mExposureTime = ns;
 }
 
 void Sensor::setFrameDuration(uint64_t ns) {
-    Mutex::Autolock lock(mControlMutex);
-    ALOGVV("Frame duration set to %f", ns/1000000.f);
-    mFrameDuration = ns;
+  Mutex::Autolock lock(mControlMutex);
+  ALOGVV("Frame duration set to %f", ns / 1000000.f);
+  mFrameDuration = ns;
 }
 
 void Sensor::setSensitivity(uint32_t gain) {
-    Mutex::Autolock lock(mControlMutex);
-    ALOGVV("Gain set to %d", gain);
-    mGainFactor = gain;
+  Mutex::Autolock lock(mControlMutex);
+  ALOGVV("Gain set to %d", gain);
+  mGainFactor = gain;
 }
 
 void Sensor::setDestinationBuffers(Buffers *buffers) {
-    Mutex::Autolock lock(mControlMutex);
-    mNextBuffers = buffers;
+  Mutex::Autolock lock(mControlMutex);
+  mNextBuffers = buffers;
 }
 
 void Sensor::setFrameNumber(uint32_t frameNumber) {
-    Mutex::Autolock lock(mControlMutex);
-    mFrameNumber = frameNumber;
+  Mutex::Autolock lock(mControlMutex);
+  mFrameNumber = frameNumber;
 }
 
 bool Sensor::waitForVSync(nsecs_t reltime) {
+  int res;
+  Mutex::Autolock lock(mControlMutex);
+
+  mGotVSync = false;
+  res = mVSync.waitRelative(mControlMutex, reltime);
+  if (res != OK && res != TIMED_OUT) {
+    ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res);
+    return false;
+  }
+  return mGotVSync;
+}
+
+bool Sensor::waitForNewFrame(nsecs_t reltime, nsecs_t *captureTime) {
+  Mutex::Autolock lock(mReadoutMutex);
+  uint8_t *ret;
+  if (mCapturedBuffers == NULL) {
     int res;
-    Mutex::Autolock lock(mControlMutex);
-
-    mGotVSync = false;
-    res = mVSync.waitRelative(mControlMutex, reltime);
-    if (res != OK && res != TIMED_OUT) {
-        ALOGE("%s: Error waiting for VSync signal: %d", __FUNCTION__, res);
-        return false;
+    res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime);
+    if (res == TIMED_OUT) {
+      return false;
+    } else if (res != OK || mCapturedBuffers == NULL) {
+      ALOGE("Error waiting for sensor readout signal: %d", res);
+      return false;
     }
-    return mGotVSync;
+  }
+  mReadoutComplete.signal();
+
+  *captureTime = mCaptureTime;
+  mCapturedBuffers = NULL;
+  return true;
 }
 
-bool Sensor::waitForNewFrame(nsecs_t reltime,
-        nsecs_t *captureTime) {
-    Mutex::Autolock lock(mReadoutMutex);
-    uint8_t *ret;
-    if (mCapturedBuffers == NULL) {
-        int res;
-        res = mReadoutAvailable.waitRelative(mReadoutMutex, reltime);
-        if (res == TIMED_OUT) {
-            return false;
-        } else if (res != OK || mCapturedBuffers == NULL) {
-            ALOGE("Error waiting for sensor readout signal: %d", res);
-            return false;
-        }
-    }
-    mReadoutComplete.signal();
-
-    *captureTime = mCaptureTime;
-    mCapturedBuffers = NULL;
-    return true;
-}
-
-Sensor::SensorListener::~SensorListener() {
-}
+Sensor::SensorListener::~SensorListener() {}
 
 void Sensor::setSensorListener(SensorListener *listener) {
-    Mutex::Autolock lock(mControlMutex);
-    mListener = listener;
+  Mutex::Autolock lock(mControlMutex);
+  mListener = listener;
 }
 
 status_t Sensor::readyToRun() {
-    ALOGV("Starting up sensor thread");
-    mStartupTime = systemTime();
-    mNextCaptureTime = 0;
-    mNextCapturedBuffers = NULL;
-    return OK;
+  ALOGV("Starting up sensor thread");
+  mStartupTime = systemTime();
+  mNextCaptureTime = 0;
+  mNextCapturedBuffers = NULL;
+  return OK;
 }
 
 bool Sensor::threadLoop() {
-    /**
-     * Sensor capture operation main loop.
-     *
-     * Stages are out-of-order relative to a single frame's processing, but
-     * in-order in time.
-     */
+  /**
+   * Sensor capture operation main loop.
+   *
+   * Stages are out-of-order relative to a single frame's processing, but
+   * in-order in time.
+   */
 
-    /**
-     * Stage 1: Read in latest control parameters
-     */
-    uint64_t exposureDuration;
-    uint64_t frameDuration;
-    uint32_t gain;
-    Buffers *nextBuffers;
-    uint32_t frameNumber;
-    SensorListener *listener = NULL;
-    {
-        Mutex::Autolock lock(mControlMutex);
-        exposureDuration = mExposureTime;
-        frameDuration    = mFrameDuration;
-        gain             = mGainFactor;
-        nextBuffers      = mNextBuffers;
-        frameNumber      = mFrameNumber;
-        listener         = mListener;
-        // Don't reuse a buffer set
-        mNextBuffers = NULL;
+  /**
+   * Stage 1: Read in latest control parameters
+   */
+  uint64_t exposureDuration;
+  uint64_t frameDuration;
+  uint32_t gain;
+  Buffers *nextBuffers;
+  uint32_t frameNumber;
+  SensorListener *listener = NULL;
+  {
+    Mutex::Autolock lock(mControlMutex);
+    exposureDuration = mExposureTime;
+    frameDuration = mFrameDuration;
+    gain = mGainFactor;
+    nextBuffers = mNextBuffers;
+    frameNumber = mFrameNumber;
+    listener = mListener;
+    // Don't reuse a buffer set
+    mNextBuffers = NULL;
 
-        // Signal VSync for start of readout
-        ALOGVV("Sensor VSync");
-        mGotVSync = true;
-        mVSync.signal();
+    // Signal VSync for start of readout
+    ALOGVV("Sensor VSync");
+    mGotVSync = true;
+    mVSync.signal();
+  }
+
+  /**
+   * Stage 3: Read out latest captured image
+   */
+
+  Buffers *capturedBuffers = NULL;
+  nsecs_t captureTime = 0;
+
+  nsecs_t startRealTime = systemTime();
+  // Stagefright cares about system time for timestamps, so base simulated
+  // time on that.
+  nsecs_t simulatedTime = startRealTime;
+  nsecs_t frameEndRealTime = startRealTime + frameDuration;
+  nsecs_t frameReadoutEndRealTime =
+      startRealTime + mRowReadoutTime * mResolution[1];
+
+  if (mNextCapturedBuffers != NULL) {
+    ALOGVV("Sensor starting readout");
+    // Pretend we're doing readout now; will signal once enough time has elapsed
+    capturedBuffers = mNextCapturedBuffers;
+    captureTime = mNextCaptureTime;
+  }
+  simulatedTime += mRowReadoutTime + kMinVerticalBlank;
+
+  // TODO: Move this signal to another thread to simulate readout
+  // time properly
+  if (capturedBuffers != NULL) {
+    ALOGVV("Sensor readout complete");
+    Mutex::Autolock lock(mReadoutMutex);
+    if (mCapturedBuffers != NULL) {
+      ALOGV("Waiting for readout thread to catch up!");
+      mReadoutComplete.wait(mReadoutMutex);
     }
 
-    /**
-     * Stage 3: Read out latest captured image
-     */
+    mCapturedBuffers = capturedBuffers;
+    mCaptureTime = captureTime;
+    mReadoutAvailable.signal();
+    capturedBuffers = NULL;
+  }
 
-    Buffers *capturedBuffers = NULL;
-    nsecs_t captureTime = 0;
+  /**
+   * Stage 2: Capture new image
+   */
+  mNextCaptureTime = simulatedTime;
+  mNextCapturedBuffers = nextBuffers;
 
-    nsecs_t startRealTime  = systemTime();
-    // Stagefright cares about system time for timestamps, so base simulated
-    // time on that.
-    nsecs_t simulatedTime    = startRealTime;
-    nsecs_t frameEndRealTime = startRealTime + frameDuration;
-    nsecs_t frameReadoutEndRealTime = startRealTime +
-            mRowReadoutTime * mResolution[1];
-
-    if (mNextCapturedBuffers != NULL) {
-        ALOGVV("Sensor starting readout");
-        // Pretend we're doing readout now; will signal once enough time has elapsed
-        capturedBuffers = mNextCapturedBuffers;
-        captureTime    = mNextCaptureTime;
+  if (mNextCapturedBuffers != NULL) {
+    if (listener != NULL) {
+      listener->onSensorEvent(frameNumber, SensorListener::EXPOSURE_START,
+                              mNextCaptureTime);
     }
-    simulatedTime += mRowReadoutTime + kMinVerticalBlank;
+    ALOGVV("Starting next capture: Exposure: %f ms, gain: %d",
+           (float)exposureDuration / 1e6, gain);
+    mScene.setExposureDuration((float)exposureDuration / 1e9);
+    mScene.calculateScene(mNextCaptureTime);
 
-    // TODO: Move this signal to another thread to simulate readout
-    // time properly
-    if (capturedBuffers != NULL) {
-        ALOGVV("Sensor readout complete");
-        Mutex::Autolock lock(mReadoutMutex);
-        if (mCapturedBuffers != NULL) {
-            ALOGV("Waiting for readout thread to catch up!");
-            mReadoutComplete.wait(mReadoutMutex);
-        }
-
-        mCapturedBuffers = capturedBuffers;
-        mCaptureTime = captureTime;
-        mReadoutAvailable.signal();
-        capturedBuffers = NULL;
-    }
-
-    /**
-     * Stage 2: Capture new image
-     */
-    mNextCaptureTime = simulatedTime;
-    mNextCapturedBuffers = nextBuffers;
-
-    if (mNextCapturedBuffers != NULL) {
-        if (listener != NULL) {
-            listener->onSensorEvent(frameNumber, SensorListener::EXPOSURE_START,
-                    mNextCaptureTime);
-        }
-        ALOGVV("Starting next capture: Exposure: %f ms, gain: %d",
-                (float)exposureDuration/1e6, gain);
-        mScene.setExposureDuration((float)exposureDuration/1e9);
-        mScene.calculateScene(mNextCaptureTime);
-
-        // Might be adding more buffers, so size isn't constant
-        for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) {
-            const StreamBuffer &b = (*mNextCapturedBuffers)[i];
-            ALOGVV("Sensor capturing buffer %d: stream %d,"
-                    " %d x %d, format %x, stride %d, buf %p, img %p",
-                    i, b.streamId, b.width, b.height, b.format, b.stride,
-                    b.buffer, b.img);
-            switch(b.format) {
+    // Might be adding more buffers, so size isn't constant
+    for (size_t i = 0; i < mNextCapturedBuffers->size(); i++) {
+      const StreamBuffer &b = (*mNextCapturedBuffers)[i];
+      ALOGVV(
+          "Sensor capturing buffer %d: stream %d,"
+          " %d x %d, format %x, stride %d, buf %p, img %p",
+          i, b.streamId, b.width, b.height, b.format, b.stride, b.buffer,
+          b.img);
+      switch (b.format) {
 #if VSOC_PLATFORM_SDK_AFTER(K)
-                case HAL_PIXEL_FORMAT_RAW16:
-                    captureRaw(b.img, gain, b.stride);
-                    break;
+        case HAL_PIXEL_FORMAT_RAW16:
+          captureRaw(b.img, gain, b.stride);
+          break;
 #endif
-                case HAL_PIXEL_FORMAT_RGB_888:
-                    captureRGB(b.img, gain, b.stride);
-                    break;
-                case HAL_PIXEL_FORMAT_RGBA_8888:
-                    captureRGBA(b.img, gain, b.stride);
-                    break;
-                case HAL_PIXEL_FORMAT_BLOB:
+        case HAL_PIXEL_FORMAT_RGB_888:
+          captureRGB(b.img, gain, b.stride);
+          break;
+        case HAL_PIXEL_FORMAT_RGBA_8888:
+          captureRGBA(b.img, gain, b.stride);
+          break;
+        case HAL_PIXEL_FORMAT_BLOB:
 #if defined HAL_DATASPACE_DEPTH
-                    if (b.dataSpace != HAL_DATASPACE_DEPTH) {
+          if (b.dataSpace != HAL_DATASPACE_DEPTH) {
 #endif
-                        // Add auxillary buffer of the right size
-                        // Assumes only one BLOB (JPEG) buffer in
-                        // mNextCapturedBuffers
-                        StreamBuffer bAux;
-                        bAux.streamId = 0;
-                        bAux.width = b.width;
-                        bAux.height = b.height;
-                        bAux.format = HAL_PIXEL_FORMAT_RGB_888;
-                        bAux.stride = b.width;
-                        bAux.buffer = NULL;
-                        // TODO: Reuse these
-                        bAux.img = new uint8_t[b.width * b.height * 3];
-                        mNextCapturedBuffers->push_back(bAux);
+            // Add auxillary buffer of the right size
+            // Assumes only one BLOB (JPEG) buffer in
+            // mNextCapturedBuffers
+            StreamBuffer bAux;
+            bAux.streamId = 0;
+            bAux.width = b.width;
+            bAux.height = b.height;
+            bAux.format = HAL_PIXEL_FORMAT_RGB_888;
+            bAux.stride = b.width;
+            bAux.buffer = NULL;
+            // TODO: Reuse these
+            bAux.img = new uint8_t[b.width * b.height * 3];
+            mNextCapturedBuffers->push_back(bAux);
 #if defined HAL_DATASPACE_DEPTH
-                    } else {
-                        captureDepthCloud(b.img);
-                    }
+          } else {
+            captureDepthCloud(b.img);
+          }
 #endif
-                    break;
-                case HAL_PIXEL_FORMAT_YCrCb_420_SP:
-                case HAL_PIXEL_FORMAT_YCbCr_420_888:
-                    captureNV21(b.img, gain, b.stride);
-                    break;
-                case HAL_PIXEL_FORMAT_YV12:
-                    // TODO:
-                    ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format);
-                    break;
-                case HAL_PIXEL_FORMAT_Y16:
-                    captureDepth(b.img, gain, b.stride);
-                    break;
-                default:
-                    ALOGE("%s: Unknown format %x, no output", __FUNCTION__,
-                            b.format);
-                    break;
-            }
-        }
+          break;
+        case HAL_PIXEL_FORMAT_YCrCb_420_SP:
+        case HAL_PIXEL_FORMAT_YCbCr_420_888:
+          captureNV21(b.img, gain, b.stride);
+          break;
+        case HAL_PIXEL_FORMAT_YV12:
+          // TODO:
+          ALOGE("%s: Format %x is TODO", __FUNCTION__, b.format);
+          break;
+        case HAL_PIXEL_FORMAT_Y16:
+          captureDepth(b.img, gain, b.stride);
+          break;
+        default:
+          ALOGE("%s: Unknown format %x, no output", __FUNCTION__, b.format);
+          break;
+      }
     }
+  }
 
-    ALOGVV("Sensor vertical blanking interval");
-    nsecs_t workDoneRealTime = systemTime();
-    const nsecs_t timeAccuracy = 2e6; // 2 ms of imprecision is ok
-    if (workDoneRealTime < frameEndRealTime - timeAccuracy) {
-        timespec t;
-        t.tv_sec = (frameEndRealTime - workDoneRealTime)  / 1000000000L;
-        t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L;
+  ALOGVV("Sensor vertical blanking interval");
+  nsecs_t workDoneRealTime = systemTime();
+  const nsecs_t timeAccuracy = 2e6;  // 2 ms of imprecision is ok
+  if (workDoneRealTime < frameEndRealTime - timeAccuracy) {
+    timespec t;
+    t.tv_sec = (frameEndRealTime - workDoneRealTime) / 1000000000L;
+    t.tv_nsec = (frameEndRealTime - workDoneRealTime) % 1000000000L;
 
-        int ret;
-        do {
-            ret = nanosleep(&t, &t);
-        } while (ret != 0);
-    }
-    nsecs_t endRealTime = systemTime();
-    ALOGVV("Frame cycle took %d ms, target %d ms",
-            (int)((endRealTime - startRealTime)/1000000),
-            (int)(frameDuration / 1000000));
-    return true;
+    int ret;
+    do {
+      ret = nanosleep(&t, &t);
+    } while (ret != 0);
+  }
+  nsecs_t endRealTime = systemTime();
+  ALOGVV("Frame cycle took %d ms, target %d ms",
+         (int)((endRealTime - startRealTime) / 1000000),
+         (int)(frameDuration / 1000000));
+  return true;
 };
 
 void Sensor::captureRaw(uint8_t *img, uint32_t gain, uint32_t stride) {
-    float totalGain = gain/100.0 * kBaseGainFactor;
-    float noiseVarGain =  totalGain * totalGain;
-    float readNoiseVar = kReadNoiseVarBeforeGain * noiseVarGain
-            + kReadNoiseVarAfterGain;
+  float totalGain = gain / 100.0 * kBaseGainFactor;
+  float noiseVarGain = totalGain * totalGain;
+  float readNoiseVar =
+      kReadNoiseVarBeforeGain * noiseVarGain + kReadNoiseVarAfterGain;
 
-    int bayerSelect[4] = {Scene::R, Scene::Gr, Scene::Gb, Scene::B}; // RGGB
-    mScene.setReadoutPixel(0,0);
-    for (unsigned int y = 0; y < mResolution[1]; y++ ) {
-        int *bayerRow = bayerSelect + (y & 0x1) * 2;
-        uint16_t *px = (uint16_t*)img + y * stride;
-        for (unsigned int x = 0; x < mResolution[0]; x++) {
-            uint32_t electronCount;
-            electronCount = mScene.getPixelElectrons()[bayerRow[x & 0x1]];
+  int bayerSelect[4] = {Scene::R, Scene::Gr, Scene::Gb, Scene::B};  // RGGB
+  mScene.setReadoutPixel(0, 0);
+  for (unsigned int y = 0; y < mResolution[1]; y++) {
+    int *bayerRow = bayerSelect + (y & 0x1) * 2;
+    uint16_t *px = (uint16_t *)img + y * stride;
+    for (unsigned int x = 0; x < mResolution[0]; x++) {
+      uint32_t electronCount;
+      electronCount = mScene.getPixelElectrons()[bayerRow[x & 0x1]];
 
-            // TODO: Better pixel saturation curve?
-            electronCount = (electronCount < kSaturationElectrons) ?
-                    electronCount : kSaturationElectrons;
+      // TODO: Better pixel saturation curve?
+      electronCount = (electronCount < kSaturationElectrons)
+                          ? electronCount
+                          : kSaturationElectrons;
 
-            // TODO: Better A/D saturation curve?
-            uint16_t rawCount = electronCount * totalGain;
-            rawCount = (rawCount < kMaxRawValue) ? rawCount : kMaxRawValue;
+      // TODO: Better A/D saturation curve?
+      uint16_t rawCount = electronCount * totalGain;
+      rawCount = (rawCount < kMaxRawValue) ? rawCount : kMaxRawValue;
 
-            // Calculate noise value
-            // TODO: Use more-correct Gaussian instead of uniform noise
-            float photonNoiseVar = electronCount * noiseVarGain;
-            float noiseStddev = sqrtf_approx(readNoiseVar + photonNoiseVar);
-            // Scaled to roughly match gaussian/uniform noise stddev
-            float noiseSample = std::rand() * (2.5 / (1.0 + RAND_MAX)) - 1.25;
+      // Calculate noise value
+      // TODO: Use more-correct Gaussian instead of uniform noise
+      float photonNoiseVar = electronCount * noiseVarGain;
+      float noiseStddev = sqrtf_approx(readNoiseVar + photonNoiseVar);
+      // Scaled to roughly match gaussian/uniform noise stddev
+      float noiseSample = std::rand() * (2.5 / (1.0 + RAND_MAX)) - 1.25;
 
-            rawCount += kBlackLevel;
-            rawCount += noiseStddev * noiseSample;
+      rawCount += kBlackLevel;
+      rawCount += noiseStddev * noiseSample;
 
-            *px++ = rawCount;
-        }
-        // TODO: Handle this better
-        //simulatedTime += mRowReadoutTime;
+      *px++ = rawCount;
     }
-    ALOGVV("Raw sensor image captured");
+    // TODO: Handle this better
+    // simulatedTime += mRowReadoutTime;
+  }
+  ALOGVV("Raw sensor image captured");
 }
 
 void Sensor::captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride) {
-    float totalGain = gain/100.0 * kBaseGainFactor;
-    // In fixed-point math, calculate total scaling from electrons to 8bpp
-    int scale64x = 64 * totalGain * 255 / kMaxRawValue;
-    uint32_t inc = ceil( (float) mResolution[0] / stride);
+  float totalGain = gain / 100.0 * kBaseGainFactor;
+  // In fixed-point math, calculate total scaling from electrons to 8bpp
+  int scale64x = 64 * totalGain * 255 / kMaxRawValue;
+  uint32_t inc = ceil((float)mResolution[0] / stride);
 
-    for (unsigned int y = 0, outY = 0; y < mResolution[1]; y+=inc, outY++ ) {
-        uint8_t *px = img + outY * stride * 4;
-        mScene.setReadoutPixel(0, y);
-        for (unsigned int x = 0; x < mResolution[0]; x+=inc) {
-            uint32_t rCount, gCount, bCount;
-            // TODO: Perfect demosaicing is a cheat
-            const uint32_t *pixel = mScene.getPixelElectrons();
-            rCount = pixel[Scene::R]  * scale64x;
-            gCount = pixel[Scene::Gr] * scale64x;
-            bCount = pixel[Scene::B]  * scale64x;
+  for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
+    uint8_t *px = img + outY * stride * 4;
+    mScene.setReadoutPixel(0, y);
+    for (unsigned int x = 0; x < mResolution[0]; x += inc) {
+      uint32_t rCount, gCount, bCount;
+      // TODO: Perfect demosaicing is a cheat
+      const uint32_t *pixel = mScene.getPixelElectrons();
+      rCount = pixel[Scene::R] * scale64x;
+      gCount = pixel[Scene::Gr] * scale64x;
+      bCount = pixel[Scene::B] * scale64x;
 
-            *px++ = rCount < 255*64 ? rCount / 64 : 255;
-            *px++ = gCount < 255*64 ? gCount / 64 : 255;
-            *px++ = bCount < 255*64 ? bCount / 64 : 255;
-            *px++ = 255;
-            for (unsigned int j = 1; j < inc; j++)
-                mScene.getPixelElectrons();
-        }
-        // TODO: Handle this better
-        //simulatedTime += mRowReadoutTime;
+      *px++ = rCount < 255 * 64 ? rCount / 64 : 255;
+      *px++ = gCount < 255 * 64 ? gCount / 64 : 255;
+      *px++ = bCount < 255 * 64 ? bCount / 64 : 255;
+      *px++ = 255;
+      for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
     }
-    ALOGVV("RGBA sensor image captured");
+    // TODO: Handle this better
+    // simulatedTime += mRowReadoutTime;
+  }
+  ALOGVV("RGBA sensor image captured");
 }
 
 void Sensor::captureRGB(uint8_t *img, uint32_t gain, uint32_t stride) {
-    float totalGain = gain/100.0 * kBaseGainFactor;
-    // In fixed-point math, calculate total scaling from electrons to 8bpp
-    int scale64x = 64 * totalGain * 255 / kMaxRawValue;
-    uint32_t inc = ceil( (float) mResolution[0] / stride);
+  float totalGain = gain / 100.0 * kBaseGainFactor;
+  // In fixed-point math, calculate total scaling from electrons to 8bpp
+  int scale64x = 64 * totalGain * 255 / kMaxRawValue;
+  uint32_t inc = ceil((float)mResolution[0] / stride);
 
-    for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++ ) {
-        mScene.setReadoutPixel(0, y);
-        uint8_t *px = img + outY * stride * 3;
-        for (unsigned int x = 0; x < mResolution[0]; x += inc) {
-            uint32_t rCount, gCount, bCount;
-            // TODO: Perfect demosaicing is a cheat
-            const uint32_t *pixel = mScene.getPixelElectrons();
-            rCount = pixel[Scene::R]  * scale64x;
-            gCount = pixel[Scene::Gr] * scale64x;
-            bCount = pixel[Scene::B]  * scale64x;
+  for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
+    mScene.setReadoutPixel(0, y);
+    uint8_t *px = img + outY * stride * 3;
+    for (unsigned int x = 0; x < mResolution[0]; x += inc) {
+      uint32_t rCount, gCount, bCount;
+      // TODO: Perfect demosaicing is a cheat
+      const uint32_t *pixel = mScene.getPixelElectrons();
+      rCount = pixel[Scene::R] * scale64x;
+      gCount = pixel[Scene::Gr] * scale64x;
+      bCount = pixel[Scene::B] * scale64x;
 
-            *px++ = rCount < 255*64 ? rCount / 64 : 255;
-            *px++ = gCount < 255*64 ? gCount / 64 : 255;
-            *px++ = bCount < 255*64 ? bCount / 64 : 255;
-            for (unsigned int j = 1; j < inc; j++)
-                mScene.getPixelElectrons();
-        }
-        // TODO: Handle this better
-        //simulatedTime += mRowReadoutTime;
+      *px++ = rCount < 255 * 64 ? rCount / 64 : 255;
+      *px++ = gCount < 255 * 64 ? gCount / 64 : 255;
+      *px++ = bCount < 255 * 64 ? bCount / 64 : 255;
+      for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
     }
-    ALOGVV("RGB sensor image captured");
+    // TODO: Handle this better
+    // simulatedTime += mRowReadoutTime;
+  }
+  ALOGVV("RGB sensor image captured");
 }
 
 void Sensor::captureNV21(uint8_t *img, uint32_t gain, uint32_t stride) {
-    float totalGain = gain/100.0 * kBaseGainFactor;
-    // Using fixed-point math with 6 bits of fractional precision.
-    // In fixed-point math, calculate total scaling from electrons to 8bpp
-    const int scale64x = 64 * totalGain * 255 / kMaxRawValue;
-    // In fixed-point math, saturation point of sensor after gain
-    const int saturationPoint = 64 * 255;
-    // Fixed-point coefficients for RGB-YUV transform
-    // Based on JFIF RGB->YUV transform.
-    // Cb/Cr offset scaled by 64x twice since they're applied post-multiply
-    const int rgbToY[]  = {19, 37, 7};
-    const int rgbToCb[] = {-10,-21, 32, 524288};
-    const int rgbToCr[] = {32,-26, -5, 524288};
-    // Scale back to 8bpp non-fixed-point
-    const int scaleOut = 64;
-    const int scaleOutSq = scaleOut * scaleOut; // after multiplies
+  float totalGain = gain / 100.0 * kBaseGainFactor;
+  // Using fixed-point math with 6 bits of fractional precision.
+  // In fixed-point math, calculate total scaling from electrons to 8bpp
+  const int scale64x = 64 * totalGain * 255 / kMaxRawValue;
+  // In fixed-point math, saturation point of sensor after gain
+  const int saturationPoint = 64 * 255;
+  // Fixed-point coefficients for RGB-YUV transform
+  // Based on JFIF RGB->YUV transform.
+  // Cb/Cr offset scaled by 64x twice since they're applied post-multiply
+  const int rgbToY[] = {19, 37, 7};
+  const int rgbToCb[] = {-10, -21, 32, 524288};
+  const int rgbToCr[] = {32, -26, -5, 524288};
+  // Scale back to 8bpp non-fixed-point
+  const int scaleOut = 64;
+  const int scaleOutSq = scaleOut * scaleOut;  // after multiplies
 
-    // inc = how many pixels to skip while reading every next pixel
-    // horizontally.
-    uint32_t inc = ceil( (float) mResolution[0] / stride);
-    // outH = projected vertical resolution based on stride.
-    uint32_t outH = mResolution[1] / inc;
-    for (unsigned int y = 0, outY = 0;
-         y < mResolution[1]; y+=inc, outY++) {
-        uint8_t *pxY = img + outY * stride;
-        uint8_t *pxVU = img + (outH + outY / 2) * stride;
-        mScene.setReadoutPixel(0,y);
-        for (unsigned int outX = 0; outX < stride; outX++) {
-            int32_t rCount, gCount, bCount;
-            // TODO: Perfect demosaicing is a cheat
-            const uint32_t *pixel = mScene.getPixelElectrons();
-            rCount = pixel[Scene::R]  * scale64x;
-            rCount = rCount < saturationPoint ? rCount : saturationPoint;
-            gCount = pixel[Scene::Gr] * scale64x;
-            gCount = gCount < saturationPoint ? gCount : saturationPoint;
-            bCount = pixel[Scene::B]  * scale64x;
-            bCount = bCount < saturationPoint ? bCount : saturationPoint;
+  // inc = how many pixels to skip while reading every next pixel
+  // horizontally.
+  uint32_t inc = ceil((float)mResolution[0] / stride);
+  // outH = projected vertical resolution based on stride.
+  uint32_t outH = mResolution[1] / inc;
+  for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
+    uint8_t *pxY = img + outY * stride;
+    uint8_t *pxVU = img + (outH + outY / 2) * stride;
+    mScene.setReadoutPixel(0, y);
+    for (unsigned int outX = 0; outX < stride; outX++) {
+      int32_t rCount, gCount, bCount;
+      // TODO: Perfect demosaicing is a cheat
+      const uint32_t *pixel = mScene.getPixelElectrons();
+      rCount = pixel[Scene::R] * scale64x;
+      rCount = rCount < saturationPoint ? rCount : saturationPoint;
+      gCount = pixel[Scene::Gr] * scale64x;
+      gCount = gCount < saturationPoint ? gCount : saturationPoint;
+      bCount = pixel[Scene::B] * scale64x;
+      bCount = bCount < saturationPoint ? bCount : saturationPoint;
 
-            *pxY++ = (rgbToY[0] * rCount +
-                    rgbToY[1] * gCount +
-                    rgbToY[2] * bCount) / scaleOutSq;
-            if (outY % 2 == 0 && outX % 2 == 0) {
-                *pxVU++ = (rgbToCb[0] * rCount +
-                        rgbToCb[1] * gCount +
-                        rgbToCb[2] * bCount +
-                        rgbToCb[3]) / scaleOutSq;
-                *pxVU++ = (rgbToCr[0] * rCount +
-                        rgbToCr[1] * gCount +
-                        rgbToCr[2] * bCount +
-                        rgbToCr[3]) / scaleOutSq;
-            }
+      *pxY++ = (rgbToY[0] * rCount + rgbToY[1] * gCount + rgbToY[2] * bCount) /
+               scaleOutSq;
+      if (outY % 2 == 0 && outX % 2 == 0) {
+        *pxVU++ = (rgbToCb[0] * rCount + rgbToCb[1] * gCount +
+                   rgbToCb[2] * bCount + rgbToCb[3]) /
+                  scaleOutSq;
+        *pxVU++ = (rgbToCr[0] * rCount + rgbToCr[1] * gCount +
+                   rgbToCr[2] * bCount + rgbToCr[3]) /
+                  scaleOutSq;
+      }
 
-            // Skip unprocessed pixels from sensor.
-            for (unsigned int j = 1; j < inc; j++)
-                mScene.getPixelElectrons();
-        }
+      // Skip unprocessed pixels from sensor.
+      for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
     }
-    ALOGVV("NV21 sensor image captured");
+  }
+  ALOGVV("NV21 sensor image captured");
 }
 
 void Sensor::captureDepth(uint8_t *img, uint32_t gain, uint32_t stride) {
-    float totalGain = gain/100.0 * kBaseGainFactor;
-    // In fixed-point math, calculate scaling factor to 13bpp millimeters
-    int scale64x = 64 * totalGain * 8191 / kMaxRawValue;
-    uint32_t inc = ceil( (float) mResolution[0] / stride);
+  float totalGain = gain / 100.0 * kBaseGainFactor;
+  // In fixed-point math, calculate scaling factor to 13bpp millimeters
+  int scale64x = 64 * totalGain * 8191 / kMaxRawValue;
+  uint32_t inc = ceil((float)mResolution[0] / stride);
 
-    for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++ ) {
-        mScene.setReadoutPixel(0, y);
-        uint16_t *px = ((uint16_t*)img) + outY * stride;
-        for (unsigned int x = 0; x < mResolution[0]; x += inc) {
-            uint32_t depthCount;
-            // TODO: Make up real depth scene instead of using green channel
-            // as depth
-            const uint32_t *pixel = mScene.getPixelElectrons();
-            depthCount = pixel[Scene::Gr] * scale64x;
+  for (unsigned int y = 0, outY = 0; y < mResolution[1]; y += inc, outY++) {
+    mScene.setReadoutPixel(0, y);
+    uint16_t *px = ((uint16_t *)img) + outY * stride;
+    for (unsigned int x = 0; x < mResolution[0]; x += inc) {
+      uint32_t depthCount;
+      // TODO: Make up real depth scene instead of using green channel
+      // as depth
+      const uint32_t *pixel = mScene.getPixelElectrons();
+      depthCount = pixel[Scene::Gr] * scale64x;
 
-            *px++ = depthCount < 8191*64 ? depthCount / 64 : 0;
-            for (unsigned int j = 1; j < inc; j++)
-                mScene.getPixelElectrons();
-        }
-        // TODO: Handle this better
-        //simulatedTime += mRowReadoutTime;
+      *px++ = depthCount < 8191 * 64 ? depthCount / 64 : 0;
+      for (unsigned int j = 1; j < inc; j++) mScene.getPixelElectrons();
     }
-    ALOGVV("Depth sensor image captured");
+    // TODO: Handle this better
+    // simulatedTime += mRowReadoutTime;
+  }
+  ALOGVV("Depth sensor image captured");
 }
 
 void Sensor::captureDepthCloud(uint8_t *img) {
 #if defined HAL_DATASPACE_DEPTH
-    android_depth_points *cloud = reinterpret_cast<android_depth_points*>(img);
+  android_depth_points *cloud = reinterpret_cast<android_depth_points *>(img);
 
-    cloud->num_points = 16;
+  cloud->num_points = 16;
 
-    // TODO: Create point cloud values that match RGB scene
-    const int FLOATS_PER_POINT = 4;
-    const float JITTER_STDDEV = 0.1f;
-    for (size_t y = 0, i = 0; y < 4; y++) {
-        for (size_t x = 0; x < 4; x++, i++) {
-            float randSampleX = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
-            randSampleX *= JITTER_STDDEV;
+  // TODO: Create point cloud values that match RGB scene
+  const int FLOATS_PER_POINT = 4;
+  const float JITTER_STDDEV = 0.1f;
+  for (size_t y = 0, i = 0; y < 4; y++) {
+    for (size_t x = 0; x < 4; x++, i++) {
+      float randSampleX = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
+      randSampleX *= JITTER_STDDEV;
 
-            float randSampleY = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
-            randSampleY *= JITTER_STDDEV;
+      float randSampleY = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
+      randSampleY *= JITTER_STDDEV;
 
-            float randSampleZ = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
-            randSampleZ *= JITTER_STDDEV;
+      float randSampleZ = std::rand() * (2.5f / (1.0f + RAND_MAX)) - 1.25f;
+      randSampleZ *= JITTER_STDDEV;
 
-            cloud->xyzc_points[i * FLOATS_PER_POINT + 0] = x - 1.5f + randSampleX;
-            cloud->xyzc_points[i * FLOATS_PER_POINT + 1] = y - 1.5f + randSampleY;
-            cloud->xyzc_points[i * FLOATS_PER_POINT + 2] = 3.f + randSampleZ;
-            cloud->xyzc_points[i * FLOATS_PER_POINT + 3] = 0.8f;
-        }
+      cloud->xyzc_points[i * FLOATS_PER_POINT + 0] = x - 1.5f + randSampleX;
+      cloud->xyzc_points[i * FLOATS_PER_POINT + 1] = y - 1.5f + randSampleY;
+      cloud->xyzc_points[i * FLOATS_PER_POINT + 2] = 3.f + randSampleZ;
+      cloud->xyzc_points[i * FLOATS_PER_POINT + 3] = 0.8f;
     }
+  }
 
-    ALOGVV("Depth point cloud captured");
+  ALOGVV("Depth point cloud captured");
 #endif
 }
 
-} // namespace android
+}  // namespace android
diff --git a/guest/hals/camera/fake-pipeline2/Sensor.h b/guest/hals/camera/fake-pipeline2/Sensor.h
index cdf1e97..326af29 100644
--- a/guest/hals/camera/fake-pipeline2/Sensor.h
+++ b/guest/hals/camera/fake-pipeline2/Sensor.h
@@ -75,173 +75,170 @@
 #ifndef HW_EMULATOR_CAMERA2_SENSOR_H
 #define HW_EMULATOR_CAMERA2_SENSOR_H
 
-#include "utils/Thread.h"
 #include "utils/Mutex.h"
+#include "utils/Thread.h"
 #include "utils/Timers.h"
 
-#include "Scene.h"
 #include "Base.h"
+#include "Scene.h"
 
 namespace android {
 
 class EmulatedFakeCamera2;
 
-class Sensor: private Thread, public virtual RefBase {
-  public:
+class Sensor : private Thread, public virtual RefBase {
+ public:
+  // width: Width of pixel array
+  // height: Height of pixel array
+  Sensor(uint32_t width, uint32_t height);
+  ~Sensor();
 
-    // width: Width of pixel array
-    // height: Height of pixel array
-    Sensor(uint32_t width, uint32_t height);
-    ~Sensor();
+  /*
+   * Power control
+   */
 
-    /*
-     * Power control
-     */
+  status_t startUp();
+  status_t shutDown();
 
-    status_t startUp();
-    status_t shutDown();
+  /*
+   * Access to scene
+   */
+  Scene &getScene();
 
-    /*
-     * Access to scene
-     */
-    Scene &getScene();
+  /*
+   * Controls that can be updated every frame
+   */
 
-    /*
-     * Controls that can be updated every frame
-     */
+  void setExposureTime(uint64_t ns);
+  void setFrameDuration(uint64_t ns);
+  void setSensitivity(uint32_t gain);
+  // Buffer must be at least stride*height*2 bytes in size
+  void setDestinationBuffers(Buffers *buffers);
+  // To simplify tracking sensor's current frame
+  void setFrameNumber(uint32_t frameNumber);
 
-    void setExposureTime(uint64_t ns);
-    void setFrameDuration(uint64_t ns);
-    void setSensitivity(uint32_t gain);
-    // Buffer must be at least stride*height*2 bytes in size
-    void setDestinationBuffers(Buffers *buffers);
-    // To simplify tracking sensor's current frame
-    void setFrameNumber(uint32_t frameNumber);
+  /*
+   * Controls that cause reconfiguration delay
+   */
 
-    /*
-     * Controls that cause reconfiguration delay
-     */
+  void setBinning(int horizontalFactor, int verticalFactor);
 
-    void setBinning(int horizontalFactor, int verticalFactor);
+  /*
+   * Synchronizing with sensor operation (vertical sync)
+   */
 
-    /*
-     * Synchronizing with sensor operation (vertical sync)
-     */
+  // Wait until the sensor outputs its next vertical sync signal, meaning it
+  // is starting readout of its latest frame of data. Returns true if vertical
+  // sync is signaled, false if the wait timed out.
+  bool waitForVSync(nsecs_t reltime);
 
-    // Wait until the sensor outputs its next vertical sync signal, meaning it
-    // is starting readout of its latest frame of data. Returns true if vertical
-    // sync is signaled, false if the wait timed out.
-    bool waitForVSync(nsecs_t reltime);
+  // Wait until a new frame has been read out, and then return the time
+  // capture started.  May return immediately if a new frame has been pushed
+  // since the last wait for a new frame. Returns true if new frame is
+  // returned, false if timed out.
+  bool waitForNewFrame(nsecs_t reltime, nsecs_t *captureTime);
 
-    // Wait until a new frame has been read out, and then return the time
-    // capture started.  May return immediately if a new frame has been pushed
-    // since the last wait for a new frame. Returns true if new frame is
-    // returned, false if timed out.
-    bool waitForNewFrame(nsecs_t reltime,
-            nsecs_t *captureTime);
-
-    /*
-     * Interrupt event servicing from the sensor. Only triggers for sensor
-     * cycles that have valid buffers to write to.
-     */
-    struct SensorListener {
-        enum Event {
-            EXPOSURE_START, // Start of exposure
-        };
-
-        virtual void onSensorEvent(uint32_t frameNumber, Event e,
-                nsecs_t timestamp) = 0;
-        virtual ~SensorListener();
+  /*
+   * Interrupt event servicing from the sensor. Only triggers for sensor
+   * cycles that have valid buffers to write to.
+   */
+  struct SensorListener {
+    enum Event {
+      EXPOSURE_START,  // Start of exposure
     };
 
-    void setSensorListener(SensorListener *listener);
+    virtual void onSensorEvent(uint32_t frameNumber, Event e,
+                               nsecs_t timestamp) = 0;
+    virtual ~SensorListener();
+  };
 
-    /**
-     * Static sensor characteristics
-     */
-    const uint32_t mResolution[2];
-    const uint32_t mActiveArray[4];
+  void setSensorListener(SensorListener *listener);
 
-    static const nsecs_t kExposureTimeRange[2];
-    static const nsecs_t kFrameDurationRange[2];
-    static const nsecs_t kMinVerticalBlank;
+  /**
+   * Static sensor characteristics
+   */
+  const uint32_t mResolution[2];
+  const uint32_t mActiveArray[4];
 
-    static const uint8_t kColorFilterArrangement;
+  static const nsecs_t kExposureTimeRange[2];
+  static const nsecs_t kFrameDurationRange[2];
+  static const nsecs_t kMinVerticalBlank;
 
-    // Output image data characteristics
-    static const uint32_t kMaxRawValue;
-    static const uint32_t kBlackLevel;
-    // Sensor sensitivity, approximate
+  static const uint8_t kColorFilterArrangement;
 
-    static const float kSaturationVoltage;
-    static const uint32_t kSaturationElectrons;
-    static const float kVoltsPerLuxSecond;
-    static const float kElectronsPerLuxSecond;
+  // Output image data characteristics
+  static const uint32_t kMaxRawValue;
+  static const uint32_t kBlackLevel;
+  // Sensor sensitivity, approximate
 
-    static const float kBaseGainFactor;
+  static const float kSaturationVoltage;
+  static const uint32_t kSaturationElectrons;
+  static const float kVoltsPerLuxSecond;
+  static const float kElectronsPerLuxSecond;
 
-    static const float kReadNoiseStddevBeforeGain; // In electrons
-    static const float kReadNoiseStddevAfterGain;  // In raw digital units
-    static const float kReadNoiseVarBeforeGain;
-    static const float kReadNoiseVarAfterGain;
+  static const float kBaseGainFactor;
 
-    // While each row has to read out, reset, and then expose, the (reset +
-    // expose) sequence can be overlapped by other row readouts, so the final
-    // minimum frame duration is purely a function of row readout time, at least
-    // if there's a reasonable number of rows.
-    const nsecs_t mRowReadoutTime;
+  static const float kReadNoiseStddevBeforeGain;  // In electrons
+  static const float kReadNoiseStddevAfterGain;   // In raw digital units
+  static const float kReadNoiseVarBeforeGain;
+  static const float kReadNoiseVarAfterGain;
 
-    static const int32_t kSensitivityRange[2];
-    static const uint32_t kDefaultSensitivity;
+  // While each row has to read out, reset, and then expose, the (reset +
+  // expose) sequence can be overlapped by other row readouts, so the final
+  // minimum frame duration is purely a function of row readout time, at least
+  // if there's a reasonable number of rows.
+  const nsecs_t mRowReadoutTime;
 
-  private:
-    Mutex mControlMutex; // Lock before accessing control parameters
-    // Start of control parameters
-    Condition mVSync;
-    bool      mGotVSync;
-    uint64_t  mExposureTime;
-    uint64_t  mFrameDuration;
-    uint32_t  mGainFactor;
-    Buffers  *mNextBuffers;
-    uint32_t  mFrameNumber;
+  static const int32_t kSensitivityRange[2];
+  static const uint32_t kDefaultSensitivity;
 
-    // End of control parameters
+ private:
+  Mutex mControlMutex;  // Lock before accessing control parameters
+  // Start of control parameters
+  Condition mVSync;
+  bool mGotVSync;
+  uint64_t mExposureTime;
+  uint64_t mFrameDuration;
+  uint32_t mGainFactor;
+  Buffers *mNextBuffers;
+  uint32_t mFrameNumber;
 
-    Mutex mReadoutMutex; // Lock before accessing readout variables
-    // Start of readout variables
-    Condition mReadoutAvailable;
-    Condition mReadoutComplete;
-    Buffers  *mCapturedBuffers;
-    nsecs_t   mCaptureTime;
-    SensorListener *mListener;
-    // End of readout variables
+  // End of control parameters
 
-    // Time of sensor startup, used for simulation zero-time point
-    nsecs_t mStartupTime;
+  Mutex mReadoutMutex;  // Lock before accessing readout variables
+  // Start of readout variables
+  Condition mReadoutAvailable;
+  Condition mReadoutComplete;
+  Buffers *mCapturedBuffers;
+  nsecs_t mCaptureTime;
+  SensorListener *mListener;
+  // End of readout variables
 
-    /**
-     * Inherited Thread virtual overrides, and members only used by the
-     * processing thread
-     */
-  private:
-    virtual status_t readyToRun();
+  // Time of sensor startup, used for simulation zero-time point
+  nsecs_t mStartupTime;
 
-    virtual bool threadLoop();
+  /**
+   * Inherited Thread virtual overrides, and members only used by the
+   * processing thread
+   */
+ private:
+  virtual status_t readyToRun();
 
-    nsecs_t mNextCaptureTime;
-    Buffers *mNextCapturedBuffers;
+  virtual bool threadLoop();
 
-    Scene mScene;
+  nsecs_t mNextCaptureTime;
+  Buffers *mNextCapturedBuffers;
 
-    void captureRaw(uint8_t *img, uint32_t gain, uint32_t stride);
-    void captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride);
-    void captureRGB(uint8_t *img, uint32_t gain, uint32_t stride);
-    void captureNV21(uint8_t *img, uint32_t gain, uint32_t stride);
-    void captureDepth(uint8_t *img, uint32_t gain, uint32_t stride);
-    void captureDepthCloud(uint8_t *img);
+  Scene mScene;
 
+  void captureRaw(uint8_t *img, uint32_t gain, uint32_t stride);
+  void captureRGBA(uint8_t *img, uint32_t gain, uint32_t stride);
+  void captureRGB(uint8_t *img, uint32_t gain, uint32_t stride);
+  void captureNV21(uint8_t *img, uint32_t gain, uint32_t stride);
+  void captureDepth(uint8_t *img, uint32_t gain, uint32_t stride);
+  void captureDepthCloud(uint8_t *img);
 };
 
-}
+}  // namespace android
 
-#endif // HW_EMULATOR_CAMERA2_SENSOR_H
+#endif  // HW_EMULATOR_CAMERA2_SENSOR_H