Migrate hardware/qcom/camera to device/huawei/angler/camera

    300b5fd QCamera2: HAL3: Enable realtime time stamp
    318a6ed mm_camera: Retry opening camera for ENODEV error am: 4cca666914 am: 5d1ee9e99c
    5d1ee9e mm_camera: Retry opening camera for ENODEV error am: 4cca666914
    4cca666 mm_camera: Retry opening camera for ENODEV error
    8583485 QCamera2: HAL3: replace abort with exit am: eeeffc0d6b am: 1cd2b06ca0
    1cd2b06 QCamera2: HAL3: replace abort with exit am: eeeffc0d6b
    eeeffc0 QCamera2: HAL3: replace abort with exit
    bfd70a8 QCamera2: HAL3: Restart daemon and mediaserver when buffer is lost. am: ecc5ece9ef am: fc3f16d449
    fc3f16d QCamera2: HAL3: Restart daemon and mediaserver when buffer is lost. am: ecc5ece9ef
    ecc5ece QCamera2: HAL3: Restart daemon and mediaserver when buffer is lost.
    fa73947 Disable clang compiler to unblock angler system build.
    04b65a2 Camera3: add dynamic whilte level support
    34cca27 Camera3: add dynamic black level support
    dc3db11 Workaround for adp8064
    3e0b0ac am bac3bdfb: am 2b922024: Merge "QCamera3: Fail unsupported stream configurations" into mnc-dr-dev
    bac3bdf am 2b922024: Merge "QCamera3: Fail unsupported stream configurations" into mnc-dr-dev
    2b92202 Merge "QCamera3: Fail unsupported stream configurations" into mnc-dr-dev
    01a698d am f90265de: am 27d6d35f: QCamera2/HAL3: Read v4l2_buffer error and notify accordingly
    f90265d am 27d6d35f: QCamera2/HAL3: Read v4l2_buffer error and notify accordingly
    0fe6077 QCamera3: Fail unsupported stream configurations
    27d6d35 QCamera2/HAL3: Read v4l2_buffer error and notify accordingly
    e7f64bd am 488104b2: am 3fdd6065: QCamera2/HAL3: Recover offline metabuffers on flush
    488104b am 3fdd6065: QCamera2/HAL3: Recover offline metabuffers on flush
    3fdd606 QCamera2/HAL3: Recover offline metabuffers on flush
    43fa335 am 6bb7fcd7: am 1bf1f3c7: Camera3: Send reprocess shutter notify and input buf after pproc
    3487b72 am 65858bf0: am a7586e9c: Camera3: Do not allow dropped metadata
    6bb7fcd am 1bf1f3c7: Camera3: Send reprocess shutter notify and input buf after pproc
    65858bf am a7586e9c: Camera3: Do not allow dropped metadata
    638509e am f563e374: am ab2f8e4b: Adding libgoog processing for raw images.
    f563e37 am ab2f8e4b: Adding libgoog processing for raw images.
    1bf1f3c Camera3: Send reprocess shutter notify and input buf after pproc
    a7586e9 Camera3: Do not allow dropped metadata
    ab2f8e4 Adding libgoog processing for raw images.
    60261d3 am db284b8e: am 915a9902: Merge "Camera3: Proper cleanup in putStreamBufs for YUV channel" into mnc-dr-dev
    288c014 am 096ba72a: am 40db3c9e: Merge "Camera3: Unmap buffers after stop reprocess stream" into mnc-dr-dev
    7561e37 am d98d10a7: am 73640de4: Camera3: Add support for dynamic EIS ON/OFF
    db284b8 am 915a9902: Merge "Camera3: Proper cleanup in putStreamBufs for YUV channel" into mnc-dr-dev
    096ba72 am 40db3c9e: Merge "Camera3: Unmap buffers after stop reprocess stream" into mnc-dr-dev
    d98d10a am 73640de4: Camera3: Add support for dynamic EIS ON/OFF
    915a990 Merge "Camera3: Proper cleanup in putStreamBufs for YUV channel" into mnc-dr-dev
    40db3c9 Merge "Camera3: Unmap buffers after stop reprocess stream" into mnc-dr-dev
    73640de Camera3: Add support for dynamic EIS ON/OFF
    a7c2b42 Camera3: Proper cleanup in putStreamBufs for YUV channel
    9b1814d Camera3: Unmap buffers after stop reprocess stream
    1e586e2 am aa0d1cb2: am e7c8e5de: Camera3: Decrease max_buffers hint for ISP for 4K recording
    bb91b3c am 626c2221: am d2e378de: Camera3: disable the image description exif field
    e40240e am 7bc704bf: am 248c5928: Camera3: Adding checks for stream handle
    aa0d1cb am e7c8e5de: Camera3: Decrease max_buffers hint for ISP for 4K recording
    e7c8e5d Camera3: Decrease max_buffers hint for ISP for 4K recording
    626c222 am d2e378de: Camera3: disable the image description exif field

Change-Id: I9ab459884c89c7642983b98e579d95f50b52b492
Signed-off-by: Ed Tam <etam@google.com>
diff --git a/camera/Android.mk b/camera/Android.mk
new file mode 100644
index 0000000..e963c1a
--- /dev/null
+++ b/camera/Android.mk
@@ -0,0 +1,10 @@
+# TODO:  Find a better way to separate build configs for ADP vs non-ADP devices
+ifneq ($(TARGET_BOARD_AUTO),true)
+  ifneq ($(filter msm8992 msm8994,$(TARGET_BOARD_PLATFORM)),)
+    ifneq ($(strip $(USE_CAMERA_STUB)),true)
+      ifneq ($(BUILD_TINY_ANDROID),true)
+        include $(call all-subdir-makefiles)
+      endif
+    endif
+  endif
+endif
diff --git a/camera/CleanSpec.mk b/camera/CleanSpec.mk
new file mode 100644
index 0000000..bb86ad0
--- /dev/null
+++ b/camera/CleanSpec.mk
@@ -0,0 +1,47 @@
+# Copyright (C) 2007 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# If you don't need to do a full clean build but would like to touch
+# a file or delete some intermediate files, add a clean step to the end
+# of the list.  These steps will only be run once, if they haven't been
+# run before.
+#
+# E.g.:
+#     $(call add-clean-step, touch -c external/sqlite/sqlite3.h)
+#     $(call add-clean-step, rm -rf $(PRODUCT_OUT)/obj/STATIC_LIBRARIES/libz_intermediates)
+#
+# Always use "touch -c" and "rm -f" or "rm -rf" to gracefully deal with
+# files that are missing or have been moved.
+#
+# Use $(PRODUCT_OUT) to get to the "out/target/product/blah/" directory.
+# Use $(OUT_DIR) to refer to the "out" directory.
+#
+# If you need to re-do something that's already mentioned, just copy
+# the command and add it to the bottom of the list.  E.g., if a change
+# that you made last week required touching a file and a change you
+# made today requires touching the same file, just copy the old
+# touch step and add it to the end of the list.
+#
+# ************************************************
+# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
+# ************************************************
+
+# For example:
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/APPS/AndroidTests_intermediates)
+#$(call add-clean-step, rm -rf $(OUT_DIR)/target/common/obj/JAVA_LIBRARIES/core_intermediates)
+#$(call add-clean-step, find $(OUT_DIR) -type f -name "IGTalkSession*" -print0 | xargs -0 rm -f)
+#$(call add-clean-step, rm -rf $(PRODUCT_OUT)/data/*)
+
+$(call add-clean-step, find $(OUT_DIR) -name "camera.msm8960*" -print0 | xargs -0 rm -rf)
diff --git a/camera/MODULE_LICENSE_BSD b/camera/MODULE_LICENSE_BSD
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/camera/MODULE_LICENSE_BSD
diff --git a/camera/QCamera2/Android.mk b/camera/QCamera2/Android.mk
new file mode 100644
index 0000000..fbc493c
--- /dev/null
+++ b/camera/QCamera2/Android.mk
@@ -0,0 +1,90 @@
+ifneq (,$(filter $(TARGET_ARCH), arm arm64))
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+# Too many clang warnings/errors, see b/23163853.
+LOCAL_CLANG := false
+
+LOCAL_SRC_FILES := \
+        util/QCameraCmdThread.cpp \
+        util/QCameraQueue.cpp \
+        util/QCameraFlash.cpp \
+        util/QCameraPerf.cpp \
+        QCamera2Hal.cpp \
+        QCamera2Factory.cpp
+
+#HAL 3.0 source
+LOCAL_SRC_FILES += \
+        HAL3/QCamera3HWI.cpp \
+        HAL3/QCamera3Mem.cpp \
+        HAL3/QCamera3Stream.cpp \
+        HAL3/QCamera3Channel.cpp \
+        HAL3/QCamera3VendorTags.cpp \
+        HAL3/QCamera3PostProc.cpp \
+        HAL3/QCamera3CropRegionMapper.cpp \
+        HAL3/QCamera3StreamMem.cpp
+
+#HAL 1.0 source
+LOCAL_SRC_FILES += \
+        HAL/QCamera2HWI.cpp \
+        HAL/QCameraMem.cpp \
+        HAL/QCameraStateMachine.cpp \
+        HAL/QCameraChannel.cpp \
+        HAL/QCameraStream.cpp \
+        HAL/QCameraPostProc.cpp \
+        HAL/QCamera2HWICallbacks.cpp \
+        HAL/QCameraParameters.cpp \
+        HAL/QCameraThermalAdapter.cpp
+
+LOCAL_CFLAGS := -Wall -Wextra -Werror
+LOCAL_CFLAGS += -DHAS_MULTIMEDIA_HINTS
+LOCAL_CFLAGS += -DENABLE_MODEL_INFO_EXIF
+
+ifeq ($(TARGET_USES_AOSP),true)
+LOCAL_CFLAGS += -DVANILLA_HAL
+endif
+
+#HAL 1.0 Flags
+LOCAL_CFLAGS += -DDEFAULT_DENOISE_MODE_ON -DHAL3
+
+LOCAL_C_INCLUDES := \
+        $(LOCAL_PATH)/stack/common \
+        frameworks/native/include/media/hardware \
+        frameworks/native/include/media/openmax \
+        hardware/qcom/media/libstagefrighthw \
+        system/media/camera/include \
+        $(LOCAL_PATH)/../mm-image-codec/qexif \
+        $(LOCAL_PATH)/../mm-image-codec/qomx_core \
+        $(LOCAL_PATH)/util \
+
+#HAL 1.0 Include paths
+LOCAL_C_INCLUDES += \
+        frameworks/native/include/media/hardware \
+        hardware/qcom/camera/QCamera2/HAL
+
+LOCAL_C_INCLUDES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include
+
+#LOCAL_STATIC_LIBRARIES := libqcamera2_util
+LOCAL_C_INCLUDES += \
+        $(TARGET_OUT_HEADERS)/qcom/display
+
+ifeq ($(call is-board-platform-in-list, msm8992),true)
+LOCAL_CFLAGS += -DOPTIMIZE_BUF_COUNT
+endif
+
+LOCAL_SHARED_LIBRARIES := libcamera_client liblog libhardware libutils libcutils libdl libsync
+LOCAL_SHARED_LIBRARIES += libmmcamera_interface libmmjpeg_interface libui libcamera_metadata
+LOCAL_SHARED_LIBRARIES += libqdMetaData
+
+LOCAL_MODULE_RELATIVE_PATH := hw
+LOCAL_MODULE := camera.$(TARGET_BOARD_PLATFORM)
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_32_BIT_ONLY := $(BOARD_QTI_CAMERA_32BIT_ONLY)
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call first-makefiles-under,$(LOCAL_PATH))
+
+endif
diff --git a/camera/QCamera2/HAL/QCamera2HWI.cpp b/camera/QCamera2/HAL/QCamera2HWI.cpp
new file mode 100644
index 0000000..104412c
--- /dev/null
+++ b/camera/QCamera2/HAL/QCamera2HWI.cpp
@@ -0,0 +1,7767 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define LOG_TAG "QCamera2HWI"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#include <utils/Log.h>
+#include <cutils/properties.h>
+#include <hardware/camera.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <utils/Errors.h>
+#include <utils/Trace.h>
+#include <gralloc_priv.h>
+#include <gui/Surface.h>
+
+#include "QCamera2HWI.h"
+#include "QCameraMem.h"
+
+#define MAP_TO_DRIVER_COORDINATE(val, base, scale, offset) \
+  ((int32_t)val * (int32_t)scale / (int32_t)base + (int32_t)offset)
+#define CAMERA_MIN_STREAMING_BUFFERS     3
+#define EXTRA_ZSL_PREVIEW_STREAM_BUF     2
+#define CAMERA_MIN_JPEG_ENCODING_BUFFERS 2
+#define CAMERA_MIN_VIDEO_BUFFERS         9
+#define CAMERA_LONGSHOT_STAGES           4
+#define CAMERA_MIN_VIDEO_BATCH_BUFFERS   6
+
+//This multiplier signifies extra buffers that we need to allocate
+//for the output of pproc
+#define CAMERA_PPROC_OUT_BUFFER_MULTIPLIER 2
+
+
+#define HDR_CONFIDENCE_THRESHOLD 0.4
+
+namespace qcamera {
+
+cam_capability_t *gCamCaps[MM_CAMERA_MAX_NUM_SENSORS];
+static pthread_mutex_t g_camlock = PTHREAD_MUTEX_INITIALIZER;
+volatile uint32_t gCamHalLogLevel = 1;
+
+camera_device_ops_t QCamera2HardwareInterface::mCameraOps = {
+    .set_preview_window =         QCamera2HardwareInterface::set_preview_window,
+    .set_callbacks =              QCamera2HardwareInterface::set_CallBacks,
+    .enable_msg_type =            QCamera2HardwareInterface::enable_msg_type,
+    .disable_msg_type =           QCamera2HardwareInterface::disable_msg_type,
+    .msg_type_enabled =           QCamera2HardwareInterface::msg_type_enabled,
+
+    .start_preview =              QCamera2HardwareInterface::start_preview,
+    .stop_preview =               QCamera2HardwareInterface::stop_preview,
+    .preview_enabled =            QCamera2HardwareInterface::preview_enabled,
+    .store_meta_data_in_buffers = QCamera2HardwareInterface::store_meta_data_in_buffers,
+
+    .start_recording =            QCamera2HardwareInterface::start_recording,
+    .stop_recording =             QCamera2HardwareInterface::stop_recording,
+    .recording_enabled =          QCamera2HardwareInterface::recording_enabled,
+    .release_recording_frame =    QCamera2HardwareInterface::release_recording_frame,
+
+    .auto_focus =                 QCamera2HardwareInterface::auto_focus,
+    .cancel_auto_focus =          QCamera2HardwareInterface::cancel_auto_focus,
+
+    .take_picture =               QCamera2HardwareInterface::take_picture,
+    .cancel_picture =             QCamera2HardwareInterface::cancel_picture,
+
+    .set_parameters =             QCamera2HardwareInterface::set_parameters,
+    .get_parameters =             QCamera2HardwareInterface::get_parameters,
+    .put_parameters =             QCamera2HardwareInterface::put_parameters,
+    .send_command =               QCamera2HardwareInterface::send_command,
+
+    .release =                    QCamera2HardwareInterface::release,
+    .dump =                       QCamera2HardwareInterface::dump,
+};
+
+/*===========================================================================
+ * FUNCTION   : set_preview_window
+ *
+ * DESCRIPTION: set preview window.
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *   @window  : window ops table
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::set_preview_window(struct camera_device *device,
+        struct preview_stream_ops *window)
+{
+    ATRACE_CALL();
+    int rc = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("%s: NULL camera device", __func__);
+        return BAD_VALUE;
+    }
+
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    rc = hw->processAPI(QCAMERA_SM_EVT_SET_PREVIEW_WINDOW, (void *)window);
+    if (rc == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_SET_PREVIEW_WINDOW, &apiResult);
+        rc = apiResult.status;
+    }
+    hw->unlockAPI();
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : set_CallBacks
+ *
+ * DESCRIPTION: set callbacks for notify and data
+ *
+ * PARAMETERS :
+ *   @device     : ptr to camera device struct
+ *   @notify_cb  : notify cb
+ *   @data_cb    : data cb
+ *   @data_cb_timestamp  : video data cd with timestamp
+ *   @get_memory : ops table for request gralloc memory
+ *   @user       : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::set_CallBacks(struct camera_device *device,
+        camera_notify_callback notify_cb,
+        camera_data_callback data_cb,
+        camera_data_timestamp_callback data_cb_timestamp,
+        camera_request_memory get_memory,
+        void *user)
+{
+    ATRACE_CALL();
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return;
+    }
+
+    qcamera_sm_evt_setcb_payload_t payload;
+    payload.notify_cb = notify_cb;
+    payload.data_cb = data_cb;
+    payload.data_cb_timestamp = data_cb_timestamp;
+    payload.get_memory = get_memory;
+    payload.user = user;
+
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    int32_t rc = hw->processAPI(QCAMERA_SM_EVT_SET_CALLBACKS, (void *)&payload);
+    if (rc == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_SET_CALLBACKS, &apiResult);
+    }
+    hw->unlockAPI();
+}
+
+/*===========================================================================
+ * FUNCTION   : enable_msg_type
+ *
+ * DESCRIPTION: enable certain msg type
+ *
+ * PARAMETERS :
+ *   @device     : ptr to camera device struct
+ *   @msg_type   : msg type mask
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::enable_msg_type(struct camera_device *device, int32_t msg_type)
+{
+    ATRACE_CALL();
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    int32_t rc = hw->processAPI(QCAMERA_SM_EVT_ENABLE_MSG_TYPE, (void *)&msg_type);
+    if (rc == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_ENABLE_MSG_TYPE, &apiResult);
+    }
+    hw->unlockAPI();
+}
+
+/*===========================================================================
+ * FUNCTION   : disable_msg_type
+ *
+ * DESCRIPTION: disable certain msg type
+ *
+ * PARAMETERS :
+ *   @device     : ptr to camera device struct
+ *   @msg_type   : msg type mask
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::disable_msg_type(struct camera_device *device, int32_t msg_type)
+{
+    ATRACE_CALL();
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    int32_t rc = hw->processAPI(QCAMERA_SM_EVT_DISABLE_MSG_TYPE, (void *)&msg_type);
+    if (rc == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_DISABLE_MSG_TYPE, &apiResult);
+    }
+    hw->unlockAPI();
+}
+
+/*===========================================================================
+ * FUNCTION   : msg_type_enabled
+ *
+ * DESCRIPTION: if certain msg type is enabled
+ *
+ * PARAMETERS :
+ *   @device     : ptr to camera device struct
+ *   @msg_type   : msg type mask
+ *
+ * RETURN     : 1 -- enabled
+ *              0 -- not enabled
+ *==========================================================================*/
+int QCamera2HardwareInterface::msg_type_enabled(struct camera_device *device, int32_t msg_type)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_MSG_TYPE_ENABLED, (void *)&msg_type);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_MSG_TYPE_ENABLED, &apiResult);
+        ret = apiResult.enabled;
+    }
+    hw->unlockAPI();
+
+   return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : start_preview
+ *
+ * DESCRIPTION: start preview
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::start_preview(struct camera_device *device)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    ALOGI("[KPI Perf] %s: E PROFILE_START_PREVIEW", __func__);
+    hw->m_perfLock.lock_acq();
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    qcamera_sm_evt_enum_t evt = QCAMERA_SM_EVT_START_PREVIEW;
+    if (hw->isNoDisplayMode()) {
+        evt = QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW;
+    }
+    ret = hw->processAPI(evt, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(evt, &apiResult);
+        ret = apiResult.status;
+    }
+    hw->unlockAPI();
+    hw->m_bPreviewStarted = true;
+    ALOGI("[KPI Perf] %s: X", __func__);
+    hw->m_perfLock.lock_rel();
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop_preview
+ *
+ * DESCRIPTION: stop preview
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::stop_preview(struct camera_device *device)
+{
+    ATRACE_CALL();
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return;
+    }
+    ALOGI("[KPI Perf] %s: E PROFILE_STOP_PREVIEW", __func__);
+    hw->m_perfLock.lock_acq();
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    int32_t ret = hw->processAPI(QCAMERA_SM_EVT_STOP_PREVIEW, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_STOP_PREVIEW, &apiResult);
+    }
+    hw->unlockAPI();
+    hw->m_perfLock.lock_rel();
+    ALOGI("[KPI Perf] %s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : preview_enabled
+ *
+ * DESCRIPTION: if preview is running
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : 1 -- running
+ *              0 -- not running
+ *==========================================================================*/
+int QCamera2HardwareInterface::preview_enabled(struct camera_device *device)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_PREVIEW_ENABLED, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_PREVIEW_ENABLED, &apiResult);
+        ret = apiResult.enabled;
+    }
+    hw->unlockAPI();
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : store_meta_data_in_buffers
+ *
+ * DESCRIPTION: if need to store meta data in buffers for video frame
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *   @enable  : flag if enable
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::store_meta_data_in_buffers(
+                struct camera_device *device, int enable)
+{
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS, (void *)&enable);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS, &apiResult);
+        ret = apiResult.status;
+    }
+    hw->unlockAPI();
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : start_recording
+ *
+ * DESCRIPTION: start recording
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::start_recording(struct camera_device *device)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    ALOGI("[KPI Perf] %s: E PROFILE_START_RECORDING", __func__);
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_START_RECORDING, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_START_RECORDING, &apiResult);
+        ret = apiResult.status;
+    }
+    hw->unlockAPI();
+    hw->m_bRecordStarted = true;
+    ALOGI("[KPI Perf] %s: X", __func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop_recording
+ *
+ * DESCRIPTION: stop recording
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::stop_recording(struct camera_device *device)
+{
+    ATRACE_CALL();
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return;
+    }
+    ALOGI("[KPI Perf] %s: E PROFILE_STOP_RECORDING", __func__);
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    int32_t ret = hw->processAPI(QCAMERA_SM_EVT_STOP_RECORDING, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_STOP_RECORDING, &apiResult);
+    }
+    hw->unlockAPI();
+    ALOGI("[KPI Perf] %s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : recording_enabled
+ *
+ * DESCRIPTION: if recording is running
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : 1 -- running
+ *              0 -- not running
+ *==========================================================================*/
+int QCamera2HardwareInterface::recording_enabled(struct camera_device *device)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_RECORDING_ENABLED, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_RECORDING_ENABLED, &apiResult);
+        ret = apiResult.enabled;
+    }
+    hw->unlockAPI();
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : release_recording_frame
+ *
+ * DESCRIPTION: return recording frame back
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *   @opaque  : ptr to frame to be returned
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::release_recording_frame(
+            struct camera_device *device, const void *opaque)
+{
+    ATRACE_CALL();
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return;
+    }
+    if (!opaque) {
+        ALOGE("%s: Error!! Frame info is NULL", __func__);
+        return;
+    }
+    CDBG_HIGH("%s: E", __func__);
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    int32_t ret = hw->processAPI(QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME, (void *)opaque);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME, &apiResult);
+    }
+    hw->unlockAPI();
+    CDBG_HIGH("%s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : auto_focus
+ *
+ * DESCRIPTION: start auto focus
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::auto_focus(struct camera_device *device)
+{
+    ATRACE_INT("Camera:AutoFocus", 1);
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    CDBG_HIGH("[KPI Perf] %s : E PROFILE_AUTO_FOCUS", __func__);
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_START_AUTO_FOCUS, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_START_AUTO_FOCUS, &apiResult);
+        ret = apiResult.status;
+    }
+    hw->unlockAPI();
+    CDBG_HIGH("[KPI Perf] %s : X", __func__);
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : cancel_auto_focus
+ *
+ * DESCRIPTION: cancel auto focus
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::cancel_auto_focus(struct camera_device *device)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    ALOGE("[KPI Perf] %s : E PROFILE_CANCEL_AUTO_FOCUS", __func__);
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_STOP_AUTO_FOCUS, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_STOP_AUTO_FOCUS, &apiResult);
+        ret = apiResult.status;
+    }
+    hw->unlockAPI();
+    CDBG_HIGH("[KPI Perf] %s : X", __func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : take_picture
+ *
+ * DESCRIPTION: take picture
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::take_picture(struct camera_device *device)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    ALOGI("[KPI Perf] %s: E PROFILE_TAKE_PICTURE", __func__);
+    hw->lockAPI();
+    if (!hw->mLongshotEnabled) {
+        hw->m_perfLock.lock_acq();
+    }
+    qcamera_api_result_t apiResult;
+
+   /** Added support for Retro-active Frames:
+     *  takePicture() is called before preparing Snapshot to indicate the
+     *  mm-camera-channel to pick up legacy frames even
+     *  before LED estimation is triggered.
+     */
+
+    CDBG_HIGH("%s: [ZSL Retro]: numRetroSnap %d, isLiveSnap %d, isZSL %d, isHDR %d",
+       __func__, hw->mParameters.getNumOfRetroSnapshots(),
+       hw->isLiveSnapshot(), hw->isZSLMode(), hw->isHDRMode());
+
+    // Check for Retro-active Frames
+    if ((hw->mParameters.getNumOfRetroSnapshots() > 0) &&
+        !hw->isLiveSnapshot() && hw->isZSLMode() &&
+        !hw->isHDRMode() && !hw->isLongshotEnabled()) {
+        // Set Retro Picture Mode
+        hw->setRetroPicture(1);
+        hw->m_bLedAfAecLock = 0;
+        CDBG_HIGH("%s: [ZSL Retro] mode", __func__);
+
+        /* Call take Picture for total number of snapshots required.
+             This includes the number of retro frames and normal frames */
+        ret = hw->processAPI(QCAMERA_SM_EVT_TAKE_PICTURE, NULL);
+        if (ret == NO_ERROR) {
+          // Wait for retro frames, before calling prepare snapshot
+          CDBG_HIGH("%s:[ZSL Retro] Wait for Retro frames to be done", __func__);
+          hw->waitAPIResult(QCAMERA_SM_EVT_TAKE_PICTURE, &apiResult);
+            ret = apiResult.status;
+        }
+
+
+        // Start Preparing for normal Frames
+        CDBG_HIGH("%s: [ZSL Retro]  Start Prepare Snapshot", __func__);
+        /* Prepare snapshot in case LED needs to be flashed */
+        ret = hw->processAPI(QCAMERA_SM_EVT_PREPARE_SNAPSHOT, NULL);
+        if (ret == NO_ERROR) {
+            hw->waitAPIResult(QCAMERA_SM_EVT_PREPARE_SNAPSHOT, &apiResult);
+            ret = apiResult.status;
+            CDBG_HIGH("%s: [ZSL Retro] Prep Snapshot done", __func__);
+        }
+        hw->mPrepSnapRun = true;
+    }
+    else {
+        hw->setRetroPicture(0);
+        CDBG_HIGH("%s: [ZSL Retro] Normal Pic Taking Mode", __func__);
+
+        CDBG_HIGH("%s: [ZSL Retro] Start Prepare Snapshot", __func__);
+        /* Prepare snapshot in case LED needs to be flashed */
+        if (hw->mFlashNeeded == 1 || hw->mParameters.isChromaFlashEnabled()) {
+            // Start Preparing for normal Frames
+            CDBG_HIGH("%s: [ZSL Retro]  Start Prepare Snapshot", __func__);
+            /* Prepare snapshot in case LED needs to be flashed */
+            ret = hw->processAPI(QCAMERA_SM_EVT_PREPARE_SNAPSHOT, NULL);
+            if (ret == NO_ERROR) {
+              hw->waitAPIResult(QCAMERA_SM_EVT_PREPARE_SNAPSHOT, &apiResult);
+                ret = apiResult.status;
+                CDBG_HIGH("%s: [ZSL Retro] Prep Snapshot done", __func__);
+
+            }
+            hw->mPrepSnapRun = true;
+        }
+        /* Regardless what the result value for prepare_snapshot,
+         * go ahead with capture anyway. Just like the way autofocus
+         * is handled in capture case. */
+        /* capture */
+        CDBG_HIGH("%s: [ZSL Retro] Capturing normal frames", __func__);
+        ret = hw->processAPI(QCAMERA_SM_EVT_TAKE_PICTURE, NULL);
+        if (ret == NO_ERROR) {
+          hw->waitAPIResult(QCAMERA_SM_EVT_TAKE_PICTURE, &apiResult);
+            ret = apiResult.status;
+        }
+    }
+    hw->unlockAPI();
+    ALOGI("[KPI Perf] %s: X", __func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : cancel_picture
+ *
+ * DESCRIPTION: cancel current take picture request
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::cancel_picture(struct camera_device *device)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_CANCEL_PICTURE, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_CANCEL_PICTURE, &apiResult);
+        ret = apiResult.status;
+    }
+    hw->unlockAPI();
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : set_parameters
+ *
+ * DESCRIPTION: set camera parameters
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *   @parms   : string of packed parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::set_parameters(struct camera_device *device,
+                                              const char *parms)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_SET_PARAMS, (void *)parms);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_SET_PARAMS, &apiResult);
+        ret = apiResult.status;
+    }
+    hw->unlockAPI();
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : get_parameters
+ *
+ * DESCRIPTION: query camera parameters
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : packed parameters in a string
+ *==========================================================================*/
+char* QCamera2HardwareInterface::get_parameters(struct camera_device *device)
+{
+    ATRACE_CALL();
+    char *ret = NULL;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return NULL;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    int32_t rc = hw->processAPI(QCAMERA_SM_EVT_GET_PARAMS, NULL);
+    if (rc == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_GET_PARAMS, &apiResult);
+        ret = apiResult.params;
+    }
+    hw->unlockAPI();
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : put_parameters
+ *
+ * DESCRIPTION: return camera parameters string back to HAL
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *   @parm    : ptr to parameter string to be returned
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::put_parameters(struct camera_device *device,
+                                               char *parm)
+{
+    ATRACE_CALL();
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    int32_t ret = hw->processAPI(QCAMERA_SM_EVT_PUT_PARAMS, (void *)parm);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_PUT_PARAMS, &apiResult);
+    }
+    hw->unlockAPI();
+}
+
+/*===========================================================================
+ * FUNCTION   : send_command
+ *
+ * DESCRIPTION: command to be executed
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *   @cmd     : cmd to be executed
+ *   @arg1    : ptr to optional argument1
+ *   @arg2    : ptr to optional argument2
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::send_command(struct camera_device *device,
+                                            int32_t cmd,
+                                            int32_t arg1,
+                                            int32_t arg2)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+
+    qcamera_sm_evt_command_payload_t payload;
+    memset(&payload, 0, sizeof(qcamera_sm_evt_command_payload_t));
+    payload.cmd = cmd;
+    payload.arg1 = arg1;
+    payload.arg2 = arg2;
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_SEND_COMMAND, (void *)&payload);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_SEND_COMMAND, &apiResult);
+        ret = apiResult.status;
+    }
+    hw->unlockAPI();
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : release
+ *
+ * DESCRIPTION: release camera resource
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::release(struct camera_device *device)
+{
+    ATRACE_CALL();
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    int32_t ret = hw->processAPI(QCAMERA_SM_EVT_RELEASE, NULL);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_RELEASE, &apiResult);
+    }
+    hw->unlockAPI();
+}
+
+/*===========================================================================
+ * FUNCTION   : dump
+ *
+ * DESCRIPTION: dump camera status
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *   @fd      : fd for status to be dumped to
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::dump(struct camera_device *device, int fd)
+{
+    int ret = NO_ERROR;
+
+    //Log level property is read when "adb shell dumpsys media.camera" is
+    //called so that the log level can be controlled without restarting
+    //media server
+    getLogLevel();
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_DUMP, (void *)&fd);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_DUMP, &apiResult);
+        ret = apiResult.status;
+    }
+    hw->unlockAPI();
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : close_camera_device
+ *
+ * DESCRIPTION: close camera device
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::close_camera_device(hw_device_t *hw_dev)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    ALOGI("[KPI Perf] %s: E",__func__);
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(
+            reinterpret_cast<camera_device_t *>(hw_dev)->priv);
+    if (!hw) {
+        ALOGE("%s: NULL camera device", __func__);
+        return BAD_VALUE;
+    }
+    delete hw;
+    ALOGI("[KPI Perf] %s: X",__func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : register_face_image
+ *
+ * DESCRIPTION: register a face image into imaging lib for face authenticatio/
+ *              face recognition
+ *
+ * PARAMETERS :
+ *   @device  : ptr to camera device struct
+ *   @img_ptr : ptr to image buffer
+ *   @config  : ptr to config about input image, i.e., format, dimension, and etc.
+ *
+ * RETURN     : >=0 unique ID of face registerd.
+ *              <0  failure.
+ *==========================================================================*/
+int QCamera2HardwareInterface::register_face_image(struct camera_device *device,
+                                                   void *img_ptr,
+                                                   cam_pp_offline_src_config_t *config)
+{
+    ATRACE_CALL();
+    int ret = NO_ERROR;
+    QCamera2HardwareInterface *hw =
+        reinterpret_cast<QCamera2HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    qcamera_sm_evt_reg_face_payload_t payload;
+    memset(&payload, 0, sizeof(qcamera_sm_evt_reg_face_payload_t));
+    payload.img_ptr = img_ptr;
+    payload.config = config;
+    hw->lockAPI();
+    qcamera_api_result_t apiResult;
+    ret = hw->processAPI(QCAMERA_SM_EVT_REG_FACE_IMAGE, (void *)&payload);
+    if (ret == NO_ERROR) {
+        hw->waitAPIResult(QCAMERA_SM_EVT_REG_FACE_IMAGE, &apiResult);
+        ret = apiResult.handle;
+    }
+    hw->unlockAPI();
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCamera2HardwareInterface
+ *
+ * DESCRIPTION: constructor of QCamera2HardwareInterface
+ *
+ * PARAMETERS :
+ *   @cameraId  : camera ID
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera2HardwareInterface::QCamera2HardwareInterface(uint32_t cameraId)
+    : mCameraId(cameraId),
+      mCameraHandle(NULL),
+      mCameraOpened(false),
+      mPreviewWindow(NULL),
+      mMsgEnabled(0),
+      mStoreMetaDataInFrame(0),
+      m_stateMachine(this),
+      m_smThreadActive(true),
+      m_postprocessor(this),
+      m_thermalAdapter(QCameraThermalAdapter::getInstance()),
+      m_cbNotifier(this),
+      m_bPreviewStarted(false),
+      m_bRecordStarted(false),
+      m_currentFocusState(CAM_AF_SCANNING),
+      mDumpFrmCnt(0U),
+      mDumpSkipCnt(0U),
+      mThermalLevel(QCAMERA_THERMAL_NO_ADJUSTMENT),
+      mCancelAutoFocus(false),
+      m_HDRSceneEnabled(false),
+      mLongshotEnabled(false),
+      m_max_pic_width(0),
+      m_max_pic_height(0),
+      mLiveSnapshotThread(0),
+      mIntPicThread(0),
+      mFlashNeeded(false),
+      mDeviceRotation(0U),
+      mCaptureRotation(0U),
+      mJpegExifRotation(0U),
+      mUseJpegExifRotation(false),
+      mIs3ALocked(false),
+      mPrepSnapRun(false),
+      mZoomLevel(0),
+      mVFrameCount(0),
+      mVLastFrameCount(0),
+      mVLastFpsTime(0),
+      mVFps(0),
+      mPFrameCount(0),
+      mPLastFrameCount(0),
+      mPLastFpsTime(0),
+      mPFps(0),
+      m_bIntJpegEvtPending(false),
+      m_bIntRawEvtPending(false),
+      mSnapshotJob(-1),
+      mPostviewJob(-1),
+      mMetadataJob(-1),
+      mReprocJob(-1),
+      mRawdataJob(-1),
+      mOutputCount(0),
+      mInputCount(0),
+      mAdvancedCaptureConfigured(false),
+      mHDRBracketingEnabled(false)
+{
+    getLogLevel();
+    ATRACE_CALL();
+    mCameraDevice.common.tag = HARDWARE_DEVICE_TAG;
+    mCameraDevice.common.version = HARDWARE_DEVICE_API_VERSION(1, 0);
+    mCameraDevice.common.close = close_camera_device;
+    mCameraDevice.ops = &mCameraOps;
+    mCameraDevice.priv = this;
+
+    pthread_mutex_init(&m_lock, NULL);
+    pthread_cond_init(&m_cond, NULL);
+
+    m_apiResultList = NULL;
+
+    pthread_mutex_init(&m_evtLock, NULL);
+    pthread_cond_init(&m_evtCond, NULL);
+    memset(&m_evtResult, 0, sizeof(qcamera_api_result_t));
+
+    pthread_mutex_init(&m_parm_lock, NULL);
+
+    pthread_mutex_init(&m_int_lock, NULL);
+    pthread_cond_init(&m_int_cond, NULL);
+
+    memset(m_channels, 0, sizeof(m_channels));
+    memset(&mExifParams, 0, sizeof(mm_jpeg_exif_params_t));
+
+    memset(m_BackendFileName, 0, QCAMERA_MAX_FILEPATH_LENGTH);
+
+    memset(mDeffOngoingJobs, 0, sizeof(mDeffOngoingJobs));
+    m_perfLock.lock_init();
+
+    mDefferedWorkThread.launch(defferedWorkRoutine, this);
+    mDefferedWorkThread.sendCmd(CAMERA_CMD_TYPE_START_DATA_PROC, FALSE, FALSE);
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera2HardwareInterface
+ *
+ * DESCRIPTION: destructor of QCamera2HardwareInterface
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera2HardwareInterface::~QCamera2HardwareInterface()
+{
+    mDefferedWorkThread.sendCmd(CAMERA_CMD_TYPE_STOP_DATA_PROC, TRUE, TRUE);
+    mDefferedWorkThread.exit();
+
+    m_perfLock.lock_acq();
+    lockAPI();
+    m_smThreadActive = false;
+    unlockAPI();
+    m_stateMachine.releaseThread();
+    closeCamera();
+    m_perfLock.lock_rel();
+    m_perfLock.lock_deinit();
+    pthread_mutex_destroy(&m_lock);
+    pthread_cond_destroy(&m_cond);
+    pthread_mutex_destroy(&m_evtLock);
+    pthread_cond_destroy(&m_evtCond);
+    pthread_mutex_destroy(&m_parm_lock);
+    pthread_mutex_destroy(&m_int_lock);
+    pthread_cond_destroy(&m_int_cond);
+}
+
+/*===========================================================================
+ * FUNCTION   : openCamera
+ *
+ * DESCRIPTION: open camera
+ *
+ * PARAMETERS :
+ *   @hw_device  : double ptr for camera device struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::openCamera(struct hw_device_t **hw_device)
+{
+    ATRACE_CALL();
+    int rc = NO_ERROR;
+    if (mCameraOpened) {
+        *hw_device = NULL;
+        return PERMISSION_DENIED;
+    }
+    ALOGI("[KPI Perf] %s: E PROFILE_OPEN_CAMERA camera id %d",
+        __func__,mCameraId);
+    m_perfLock.lock_acq();
+    rc = openCamera();
+    if (rc == NO_ERROR){
+        *hw_device = &mCameraDevice.common;
+        if (m_thermalAdapter.init(this) != 0) {
+          ALOGE("Init thermal adapter failed");
+        }
+    }
+    else
+        *hw_device = NULL;
+
+    ALOGI("[KPI Perf] %s: X PROFILE_OPEN_CAMERA camera id %d, rc: %d",
+        __func__,mCameraId, rc);
+
+    m_perfLock.lock_rel();
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : openCamera
+ *
+ * DESCRIPTION: open camera
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::openCamera()
+{
+    int32_t l_curr_width = 0;
+    int32_t l_curr_height = 0;
+    m_max_pic_width = 0;
+    m_max_pic_height = 0;
+    size_t i;
+    int32_t rc = 0;
+
+    if (mCameraHandle) {
+        ALOGE("Failure: Camera already opened");
+        return ALREADY_EXISTS;
+    }
+    rc = camera_open((uint8_t)mCameraId, &mCameraHandle);
+    if (rc) {
+        ALOGE("camera_open failed. rc = %d, mCameraHandle = %p", rc, mCameraHandle);
+        return rc;
+    }
+    if (NULL == gCamCaps[mCameraId])
+        initCapabilities(mCameraId,mCameraHandle);
+
+    mCameraHandle->ops->register_event_notify(mCameraHandle->camera_handle,
+                                              camEvtHandle,
+                                              (void *) this);
+
+    /* get max pic size for jpeg work buf calculation*/
+    for(i = 0; i < gCamCaps[mCameraId]->picture_sizes_tbl_cnt - 1; i++)
+    {
+      l_curr_width = gCamCaps[mCameraId]->picture_sizes_tbl[i].width;
+      l_curr_height = gCamCaps[mCameraId]->picture_sizes_tbl[i].height;
+
+      if ((l_curr_width * l_curr_height) >
+        (m_max_pic_width * m_max_pic_height)) {
+        m_max_pic_width = l_curr_width;
+        m_max_pic_height = l_curr_height;
+      }
+    }
+
+    rc = m_postprocessor.init(jpegEvtHandle, this);
+    if (rc != 0) {
+        ALOGE("Init Postprocessor failed");
+        mCameraHandle->ops->close_camera(mCameraHandle->camera_handle);
+        mCameraHandle = NULL;
+        return UNKNOWN_ERROR;
+    }
+
+    // update padding info from jpeg
+    cam_padding_info_t padding_info;
+    m_postprocessor.getJpegPaddingReq(padding_info);
+    if (gCamCaps[mCameraId]->padding_info.width_padding < padding_info.width_padding) {
+        gCamCaps[mCameraId]->padding_info.width_padding = padding_info.width_padding;
+    }
+    if (gCamCaps[mCameraId]->padding_info.height_padding < padding_info.height_padding) {
+        gCamCaps[mCameraId]->padding_info.height_padding = padding_info.height_padding;
+    }
+    if (gCamCaps[mCameraId]->padding_info.plane_padding < padding_info.plane_padding) {
+        gCamCaps[mCameraId]->padding_info.plane_padding = padding_info.plane_padding;
+    }
+
+    mParameters.init(gCamCaps[mCameraId], mCameraHandle, this);
+    mParameters.setMinPpMask(gCamCaps[mCameraId]->min_required_pp_mask);
+
+    mCameraOpened = true;
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : closeCamera
+ *
+ * DESCRIPTION: close camera
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::closeCamera()
+{
+    int rc = NO_ERROR;
+    int i;
+
+    if (!mCameraOpened) {
+        return NO_ERROR;
+    }
+    ALOGI("[KPI Perf] %s: E PROFILE_CLOSE_CAMERA camera id %d",
+        __func__, mCameraId);
+
+    pthread_mutex_lock(&m_parm_lock);
+
+    // set open flag to false
+    mCameraOpened = false;
+
+    // Reset Stream config info
+    mParameters.setStreamConfigure(false, false, true);
+
+    // deinit Parameters
+    mParameters.deinit();
+
+    pthread_mutex_unlock(&m_parm_lock);
+
+    // exit notifier
+    m_cbNotifier.exit();
+
+    // stop and deinit postprocessor
+    waitDefferedWork(mReprocJob);
+    m_postprocessor.stop();
+    m_postprocessor.deinit();
+
+    //free all pending api results here
+    if(m_apiResultList != NULL) {
+        api_result_list *apiResultList = m_apiResultList;
+        api_result_list *apiResultListNext;
+        while (apiResultList != NULL) {
+            apiResultListNext = apiResultList->next;
+            free(apiResultList);
+            apiResultList = apiResultListNext;
+        }
+    }
+
+    m_thermalAdapter.deinit();
+
+    // delete all channels if not already deleted
+    for (i = 0; i < QCAMERA_CH_TYPE_MAX; i++) {
+        if (m_channels[i] != NULL) {
+            m_channels[i]->stop();
+            delete m_channels[i];
+            m_channels[i] = NULL;
+        }
+    }
+
+    rc = mCameraHandle->ops->close_camera(mCameraHandle->camera_handle);
+    mCameraHandle = NULL;
+    ALOGI("[KPI Perf] %s: X PROFILE_CLOSE_CAMERA camera id %d, rc: %d",
+        __func__, mCameraId, rc);
+
+    return rc;
+}
+
+#define DATA_PTR(MEM_OBJ,INDEX) MEM_OBJ->getPtr( INDEX )
+
+/*===========================================================================
+ * FUNCTION   : initCapabilities
+ *
+ * DESCRIPTION: initialize camera capabilities in static data struct
+ *
+ * PARAMETERS :
+ *   @cameraId  : camera Id
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::initCapabilities(uint32_t cameraId,
+        mm_camera_vtbl_t *cameraHandle)
+{
+    ATRACE_CALL();
+    int rc = NO_ERROR;
+    QCameraHeapMemory *capabilityHeap = NULL;
+
+    /* Allocate memory for capability buffer */
+    capabilityHeap = new QCameraHeapMemory(QCAMERA_ION_USE_CACHE);
+    rc = capabilityHeap->allocate(1, sizeof(cam_capability_t), NON_SECURE);
+    if(rc != OK) {
+        ALOGE("%s: No memory for cappability", __func__);
+        goto allocate_failed;
+    }
+
+    /* Map memory for capability buffer */
+    memset(DATA_PTR(capabilityHeap,0), 0, sizeof(cam_capability_t));
+    rc = cameraHandle->ops->map_buf(cameraHandle->camera_handle,
+                                CAM_MAPPING_BUF_TYPE_CAPABILITY,
+                                capabilityHeap->getFd(0),
+                                sizeof(cam_capability_t));
+    if(rc < 0) {
+        ALOGE("%s: failed to map capability buffer", __func__);
+        goto map_failed;
+    }
+
+    /* Query Capability */
+    rc = cameraHandle->ops->query_capability(cameraHandle->camera_handle);
+    if(rc < 0) {
+        ALOGE("%s: failed to query capability",__func__);
+        goto query_failed;
+    }
+    gCamCaps[cameraId] = (cam_capability_t *)malloc(sizeof(cam_capability_t));
+    if (!gCamCaps[cameraId]) {
+        ALOGE("%s: out of memory", __func__);
+        goto query_failed;
+    }
+    memcpy(gCamCaps[cameraId], DATA_PTR(capabilityHeap,0),
+                                        sizeof(cam_capability_t));
+
+    rc = NO_ERROR;
+
+query_failed:
+    cameraHandle->ops->unmap_buf(cameraHandle->camera_handle,
+                            CAM_MAPPING_BUF_TYPE_CAPABILITY);
+map_failed:
+    capabilityHeap->deallocate();
+    delete capabilityHeap;
+allocate_failed:
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getCapabilities
+ *
+ * DESCRIPTION: query camera capabilities
+ *
+ * PARAMETERS :
+ *   @cameraId  : camera Id
+ *   @info      : camera info struct to be filled in with camera capabilities
+ *
+ * RETURN     : int type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::getCapabilities(uint32_t cameraId,
+        struct camera_info *info)
+{
+    ATRACE_CALL();
+    int rc = NO_ERROR;
+    struct  camera_info *p_info;
+    pthread_mutex_lock(&g_camlock);
+    p_info = get_cam_info(cameraId);
+    p_info->device_version = CAMERA_DEVICE_API_VERSION_1_0;
+    p_info->static_camera_characteristics = NULL;
+    memcpy(info, p_info, sizeof (struct camera_info));
+    pthread_mutex_unlock(&g_camlock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getCamHalCapabilities
+ *
+ * DESCRIPTION: get the HAL capabilities structure
+ *
+ * PARAMETERS :
+ *   @cameraId  : camera Id
+ *
+ * RETURN     : capability structure of respective camera
+ *
+ *==========================================================================*/
+cam_capability_t* QCamera2HardwareInterface::getCamHalCapabilities()
+{
+    return gCamCaps[mCameraId];
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufNumRequired
+ *
+ * DESCRIPTION: return number of stream buffers needed for given stream type
+ *
+ * PARAMETERS :
+ *   @stream_type  : type of stream
+ *
+ * RETURN     : number of buffers needed
+ *==========================================================================*/
+uint8_t QCamera2HardwareInterface::getBufNumRequired(cam_stream_type_t stream_type)
+{
+    int bufferCnt = 0;
+    int minCaptureBuffers = mParameters.getNumOfSnapshots();
+    char value[PROPERTY_VALUE_MAX];
+    bool raw_yuv = false;
+
+    int zslQBuffers = mParameters.getZSLQueueDepth();
+
+    int minCircularBufNum = mParameters.getMaxUnmatchedFramesInQueue() +
+                            CAMERA_MIN_JPEG_ENCODING_BUFFERS;
+
+    int maxStreamBuf = minCaptureBuffers + mParameters.getMaxUnmatchedFramesInQueue() +
+                       mParameters.getNumOfExtraHDRInBufsIfNeeded() -
+                       mParameters.getNumOfExtraHDROutBufsIfNeeded() +
+                       mParameters.getNumOfExtraBuffersForImageProc() +
+                       EXTRA_ZSL_PREVIEW_STREAM_BUF;
+
+    int minUndequeCount = 0;
+    if (!isNoDisplayMode()) {
+        if(mPreviewWindow != NULL) {
+            if (mPreviewWindow->get_min_undequeued_buffer_count(mPreviewWindow,&minUndequeCount)
+                != 0) {
+                ALOGE("get_min_undequeued_buffer_count  failed");
+                //TODO: hardcoded because MIN_UNDEQUEUED_BUFFERS not defined
+                //minUndequeCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS;
+                minUndequeCount = 2;
+            }
+        } else {
+            //preview window might not be set at this point. So, query directly
+            //from BufferQueue implementation of gralloc buffers.
+            //minUndequeCount = BufferQueue::MIN_UNDEQUEUED_BUFFERS;
+            //hardcoded because MIN_UNDEQUEUED_BUFFERS not defined. REVISIT
+            minUndequeCount = 2;
+        }
+    }
+
+    // Get buffer count for the particular stream type
+    switch (stream_type) {
+    case CAM_STREAM_TYPE_PREVIEW:
+        {
+            if (mParameters.isZSLMode()) {
+                // We need to add two extra streming buffers to add
+                // flexibility in forming matched super buf in ZSL queue.
+                // with number being 'zslQBuffers + minCircularBufNum'
+                // we see preview buffers sometimes get dropped at CPP
+                // and super buf is not forming in ZSL Q for long time.
+
+                bufferCnt = zslQBuffers + minCircularBufNum +
+                        mParameters.getNumOfExtraBuffersForImageProc() +
+                        EXTRA_ZSL_PREVIEW_STREAM_BUF +
+                        mParameters.getNumOfExtraBuffersForPreview();
+            } else {
+                bufferCnt = CAMERA_MIN_STREAMING_BUFFERS +
+                        mParameters.getMaxUnmatchedFramesInQueue() +
+                        mParameters.getNumOfExtraBuffersForPreview();
+            }
+            bufferCnt += minUndequeCount;
+        }
+        break;
+    case CAM_STREAM_TYPE_POSTVIEW:
+        {
+            bufferCnt = minCaptureBuffers*CAMERA_PPROC_OUT_BUFFER_MULTIPLIER +
+                        mParameters.getNumOfExtraHDRInBufsIfNeeded() -
+                        mParameters.getNumOfExtraHDROutBufsIfNeeded() +
+                        mParameters.getNumOfExtraBuffersForImageProc();
+
+            if (bufferCnt > maxStreamBuf) {
+                bufferCnt = maxStreamBuf;
+            }
+            bufferCnt += minUndequeCount;
+        }
+        break;
+    case CAM_STREAM_TYPE_SNAPSHOT:
+        {
+            if (mParameters.isZSLMode() || mLongshotEnabled) {
+                if ((minCaptureBuffers == 1 || mParameters.isUbiRefocus()) &&
+                        !mLongshotEnabled) {
+                    // Single ZSL snapshot case
+                    bufferCnt = zslQBuffers + CAMERA_MIN_STREAMING_BUFFERS +
+                            mParameters.getNumOfExtraBuffersForImageProc();
+                }
+                else {
+                    // ZSL Burst or Longshot case
+                    bufferCnt = zslQBuffers + minCircularBufNum +
+                            mParameters.getNumOfExtraBuffersForImageProc();
+                }
+            } else {
+                bufferCnt = minCaptureBuffers*CAMERA_PPROC_OUT_BUFFER_MULTIPLIER +
+                            mParameters.getNumOfExtraHDRInBufsIfNeeded() -
+                            mParameters.getNumOfExtraHDROutBufsIfNeeded() +
+                            mParameters.getNumOfExtraBuffersForImageProc();
+
+                if (bufferCnt > maxStreamBuf) {
+                    bufferCnt = maxStreamBuf;
+                }
+            }
+        }
+        break;
+    case CAM_STREAM_TYPE_RAW:
+        property_get("persist.camera.raw_yuv", value, "0");
+        raw_yuv = atoi(value) > 0 ? true : false;
+
+        if (isRdiMode() || raw_yuv) {
+            CDBG_HIGH("RDI_DEBUG %s[%d]: CAM_STREAM_TYPE_RAW",
+              __func__, __LINE__);
+            bufferCnt = zslQBuffers + minCircularBufNum;
+        } else if (mParameters.isZSLMode()) {
+            bufferCnt = zslQBuffers + minCircularBufNum;
+        } else {
+            bufferCnt = minCaptureBuffers*CAMERA_PPROC_OUT_BUFFER_MULTIPLIER +
+                        mParameters.getNumOfExtraHDRInBufsIfNeeded() -
+                        mParameters.getNumOfExtraHDROutBufsIfNeeded() +
+                        mParameters.getNumOfExtraBuffersForImageProc();
+
+            if (bufferCnt > maxStreamBuf) {
+                bufferCnt = maxStreamBuf;
+            }
+        }
+        break;
+    case CAM_STREAM_TYPE_VIDEO:
+        {
+            if (mParameters.getBufBatchCount()) {
+                bufferCnt = CAMERA_MIN_VIDEO_BATCH_BUFFERS;
+            } else {
+                bufferCnt = CAMERA_MIN_VIDEO_BUFFERS;
+            }
+
+            bufferCnt += mParameters.getNumOfExtraBuffersForVideo();
+            //if its 4K encoding usecase, then add extra buffer
+            cam_dimension_t dim;
+            mParameters.getStreamDimension(CAM_STREAM_TYPE_VIDEO, dim);
+            if (is4k2kResolution(&dim)) {
+                 //get additional buffer count
+                 property_get("vidc.enc.dcvs.extra-buff-count", value, "0");
+                 bufferCnt += atoi(value);
+            }
+            ALOGI("Buffer count is %d, width / height (%d/%d) ", bufferCnt, dim.width, dim.height);
+        }
+        break;
+    case CAM_STREAM_TYPE_METADATA:
+        {
+            if (mParameters.isZSLMode()) {
+                // MetaData buffers should be >= (Preview buffers-minUndequeCount)
+                bufferCnt = zslQBuffers + minCircularBufNum +
+                            mParameters.getNumOfExtraHDRInBufsIfNeeded() -
+                            mParameters.getNumOfExtraHDROutBufsIfNeeded() +
+                            mParameters.getNumOfExtraBuffersForImageProc() +
+                            EXTRA_ZSL_PREVIEW_STREAM_BUF;
+            } else {
+                bufferCnt = minCaptureBuffers +
+                            mParameters.getNumOfExtraHDRInBufsIfNeeded() -
+                            mParameters.getNumOfExtraHDROutBufsIfNeeded() +
+                            mParameters.getMaxUnmatchedFramesInQueue() +
+                            CAMERA_MIN_STREAMING_BUFFERS +
+                            mParameters.getNumOfExtraBuffersForImageProc();
+
+                if (bufferCnt > zslQBuffers + minCircularBufNum) {
+                    bufferCnt = zslQBuffers + minCircularBufNum;
+                }
+            }
+        }
+        break;
+    case CAM_STREAM_TYPE_OFFLINE_PROC:
+        {
+            bufferCnt = minCaptureBuffers;
+            // One of the ubifocus buffers is miscellaneous buffer
+            if (mParameters.isUbiRefocus()) {
+                bufferCnt -= 1;
+            }
+            if (mLongshotEnabled) {
+                char prop[PROPERTY_VALUE_MAX];
+                memset(prop, 0, sizeof(prop));
+                property_get("persist.camera.longshot.stages", prop, "0");
+                int longshotStages = atoi(prop);
+                if (longshotStages > 0 && longshotStages < CAMERA_LONGSHOT_STAGES) {
+                    bufferCnt = longshotStages;
+                }
+                else {
+                    bufferCnt = CAMERA_LONGSHOT_STAGES;
+                }
+            }
+        }
+        break;
+    case CAM_STREAM_TYPE_ANALYSIS:
+    case CAM_STREAM_TYPE_DEFAULT:
+    case CAM_STREAM_TYPE_MAX:
+    default:
+        bufferCnt = 0;
+        break;
+    }
+
+    if (CAM_MAX_NUM_BUFS_PER_STREAM < bufferCnt) {
+        ALOGE("%s: Buffer count %d for stream type %d exceeds limit %d",
+                __func__, bufferCnt, stream_type, CAM_MAX_NUM_BUFS_PER_STREAM);
+        return CAM_MAX_NUM_BUFS_PER_STREAM;
+    }
+
+    return (uint8_t)bufferCnt;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateStreamBuf
+ *
+ * DESCRIPTION: alocate stream buffers
+ *
+ * PARAMETERS :
+ *   @stream_type  : type of stream
+ *   @size         : size of buffer
+ *   @stride       : stride of buffer
+ *   @scanline     : scanline of buffer
+ *   @bufferCnt    : [IN/OUT] minimum num of buffers to be allocated.
+ *                   could be modified during allocation if more buffers needed
+ *
+ * RETURN     : ptr to a memory obj that holds stream buffers.
+ *              NULL if failed
+ *==========================================================================*/
+QCameraMemory *QCamera2HardwareInterface::allocateStreamBuf(
+        cam_stream_type_t stream_type, size_t size, int stride, int scanline,
+        uint8_t &bufferCnt)
+{
+    int rc = NO_ERROR;
+    QCameraMemory *mem = NULL;
+    bool bCachedMem = QCAMERA_ION_USE_CACHE;
+    bool bPoolMem = false;
+    char value[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.mem.usepool", value, "1");
+    if (atoi(value) == 1) {
+        bPoolMem = true;
+    }
+
+    // Allocate stream buffer memory object
+    switch (stream_type) {
+    case CAM_STREAM_TYPE_PREVIEW:
+        {
+            if (isNoDisplayMode()) {
+                mem = new QCameraStreamMemory(mGetMemory,
+                        bCachedMem,
+                        (bPoolMem) ? &m_memoryPool : NULL,
+                        stream_type);
+            } else {
+                cam_dimension_t dim;
+                QCameraGrallocMemory *grallocMemory =
+                    new QCameraGrallocMemory(mGetMemory);
+
+                mParameters.getStreamDimension(stream_type, dim);
+                if (grallocMemory)
+                    grallocMemory->setWindowInfo(mPreviewWindow, dim.width,
+                        dim.height, stride, scanline,
+                        mParameters.getPreviewHalPixelFormat());
+                mem = grallocMemory;
+            }
+        }
+        break;
+    case CAM_STREAM_TYPE_POSTVIEW:
+        {
+            if (isNoDisplayMode() || isPreviewRestartEnabled()) {
+                mem = new QCameraStreamMemory(mGetMemory, bCachedMem);
+            } else {
+                cam_dimension_t dim;
+                QCameraGrallocMemory *grallocMemory =
+                        new QCameraGrallocMemory(mGetMemory);
+
+                mParameters.getStreamDimension(stream_type, dim);
+                if (grallocMemory)
+                    grallocMemory->setWindowInfo(mPreviewWindow, dim.width,
+                            dim.height, stride, scanline,
+                            mParameters.getPreviewHalPixelFormat());
+                mem = grallocMemory;
+            }
+        }
+        break;
+    case CAM_STREAM_TYPE_ANALYSIS:
+    case CAM_STREAM_TYPE_SNAPSHOT:
+    case CAM_STREAM_TYPE_RAW:
+    case CAM_STREAM_TYPE_METADATA:
+    case CAM_STREAM_TYPE_OFFLINE_PROC:
+        mem = new QCameraStreamMemory(mGetMemory,
+                bCachedMem,
+                (bPoolMem) ? &m_memoryPool : NULL,
+                stream_type);
+        break;
+    case CAM_STREAM_TYPE_VIDEO:
+        {
+            property_get("persist.camera.mem.usecache", value, "0");
+            if (atoi(value) == 0) {
+                bCachedMem = QCAMERA_ION_USE_NOCACHE;
+            }
+            CDBG_HIGH("%s: vidoe buf using cached memory = %d", __func__, bCachedMem);
+            mem = new QCameraVideoMemory(mGetMemory, bCachedMem);
+        }
+        break;
+    case CAM_STREAM_TYPE_DEFAULT:
+    case CAM_STREAM_TYPE_MAX:
+    default:
+        break;
+    }
+    if (!mem) {
+        return NULL;
+    }
+
+    if (bufferCnt > 0) {
+        if (mParameters.isSecureMode() &&
+            (stream_type == CAM_STREAM_TYPE_RAW) &&
+            (mParameters.isRdiMode())) {
+            ALOGD("%s: Allocating %d secure buffers of size %d ", __func__, bufferCnt, size);
+            rc = mem->allocate(bufferCnt, size, SECURE);
+        } else {
+            rc = mem->allocate(bufferCnt, size, NON_SECURE);
+        }
+        if (rc < 0) {
+            delete mem;
+            return NULL;
+        }
+        bufferCnt = mem->getCnt();
+    }
+    return mem;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateMoreStreamBuf
+ *
+ * DESCRIPTION: alocate more stream buffers from the memory object
+ *
+ * PARAMETERS :
+ *   @mem_obj      : memory object ptr
+ *   @size         : size of buffer
+ *   @bufferCnt    : [IN/OUT] additional number of buffers to be allocated.
+ *                   output will be the number of total buffers
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::allocateMoreStreamBuf(
+        QCameraMemory *mem_obj, size_t size, uint8_t &bufferCnt)
+{
+    int rc = NO_ERROR;
+
+    if (bufferCnt > 0) {
+        rc = mem_obj->allocateMore(bufferCnt, size);
+        bufferCnt = mem_obj->getCnt();
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateMiscBuf
+ *
+ * DESCRIPTION: alocate miscellaneous buffer
+ *
+ * PARAMETERS :
+ *   @streamInfo  : stream info
+ *
+ * RETURN     : ptr to a memory obj that holds stream info buffer.
+ *              NULL if failed
+ *==========================================================================*/
+QCameraHeapMemory *QCamera2HardwareInterface::allocateMiscBuf(
+        cam_stream_info_t *streamInfo)
+{
+    int rc = NO_ERROR;
+    uint8_t bufNum = 0;
+    size_t bufSize = 0;
+    QCameraHeapMemory *miscBuf = NULL;
+    uint32_t feature_mask =
+            streamInfo->reprocess_config.pp_feature_config.feature_mask;
+
+    switch (streamInfo->stream_type) {
+    case CAM_STREAM_TYPE_OFFLINE_PROC:
+        if (CAM_QCOM_FEATURE_TRUEPORTRAIT & feature_mask) {
+            bufNum = 1;
+            bufSize = mParameters.getTPMaxMetaSize();
+        } else if (CAM_QCOM_FEATURE_REFOCUS & feature_mask) {
+            bufNum = 1;
+            bufSize = mParameters.getRefocusMaxMetaSize();
+        }
+        break;
+    default:
+        break;
+    }
+
+    if (bufNum && bufSize) {
+        miscBuf = new QCameraHeapMemory(QCAMERA_ION_USE_CACHE);
+
+        if (!miscBuf) {
+            ALOGE("%s: Unable to allocate miscBuf object", __func__);
+            return NULL;
+        }
+
+        rc = miscBuf->allocate(bufNum, bufSize, NON_SECURE);
+        if (rc < 0) {
+            ALOGE("%s: Failed to allocate misc buffer memory", __func__);
+            delete miscBuf;
+            return NULL;
+        }
+    }
+
+    return miscBuf;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateStreamInfoBuf
+ *
+ * DESCRIPTION: alocate stream info buffer
+ *
+ * PARAMETERS :
+ *   @stream_type  : type of stream
+ *
+ * RETURN     : ptr to a memory obj that holds stream info buffer.
+ *              NULL if failed
+ *==========================================================================*/
+QCameraHeapMemory *QCamera2HardwareInterface::allocateStreamInfoBuf(
+        cam_stream_type_t stream_type)
+{
+    int rc = NO_ERROR;
+    char value[PROPERTY_VALUE_MAX];
+    bool raw_yuv = false;
+
+    QCameraHeapMemory *streamInfoBuf = new QCameraHeapMemory(QCAMERA_ION_USE_CACHE);
+    if (!streamInfoBuf) {
+        ALOGE("allocateStreamInfoBuf: Unable to allocate streamInfo object");
+        return NULL;
+    }
+
+    rc = streamInfoBuf->allocate(1, sizeof(cam_stream_info_t), NON_SECURE);
+    if (rc < 0) {
+        ALOGE("allocateStreamInfoBuf: Failed to allocate stream info memory");
+        delete streamInfoBuf;
+        return NULL;
+    }
+
+    cam_stream_info_t *streamInfo = (cam_stream_info_t *)streamInfoBuf->getPtr(0);
+    memset(streamInfo, 0, sizeof(cam_stream_info_t));
+    streamInfo->stream_type = stream_type;
+    rc = mParameters.getStreamFormat(stream_type, streamInfo->fmt);
+    rc = mParameters.getStreamDimension(stream_type, streamInfo->dim);
+    rc = mParameters.getStreamRotation(stream_type, streamInfo->pp_config, streamInfo->dim);
+    streamInfo->num_bufs = getBufNumRequired(stream_type);
+    streamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    streamInfo->is_secure = NON_SECURE;
+    switch (stream_type) {
+    case CAM_STREAM_TYPE_SNAPSHOT:
+        if ((mParameters.isZSLMode() && mParameters.getRecordingHintValue() != true) ||
+            mLongshotEnabled) {
+            streamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+        } else {
+            streamInfo->streaming_mode = CAM_STREAMING_MODE_BURST;
+            streamInfo->num_of_burst = (uint8_t)
+                    (mParameters.getNumOfSnapshots()
+                        + mParameters.getNumOfExtraHDRInBufsIfNeeded()
+                        - mParameters.getNumOfExtraHDROutBufsIfNeeded()
+                        + mParameters.getNumOfExtraBuffersForImageProc());
+        }
+        break;
+    case CAM_STREAM_TYPE_RAW:
+        property_get("persist.camera.raw_yuv", value, "0");
+        raw_yuv = atoi(value) > 0 ? true : false;
+        if ((mParameters.isZSLMode() || isRdiMode() || raw_yuv) &&
+                !mParameters.getofflineRAW()) {
+            CDBG_HIGH("RDI_DEBUG %s[%d]: CAM_STREAM_TYPE_RAW",
+              __func__, __LINE__);
+            streamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+        } else {
+            streamInfo->streaming_mode = CAM_STREAMING_MODE_BURST;
+            streamInfo->num_of_burst = mParameters.getNumOfSnapshots();
+        }
+        if (mParameters.isSecureMode() && mParameters.isRdiMode()) {
+            streamInfo->is_secure = SECURE;
+        } else {
+            streamInfo->is_secure = NON_SECURE;
+        }
+        break;
+    case CAM_STREAM_TYPE_POSTVIEW:
+        if (mLongshotEnabled) {
+            streamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+        } else {
+            streamInfo->streaming_mode = CAM_STREAMING_MODE_BURST;
+            streamInfo->num_of_burst = (uint8_t)(mParameters.getNumOfSnapshots()
+                + mParameters.getNumOfExtraHDRInBufsIfNeeded()
+                - mParameters.getNumOfExtraHDROutBufsIfNeeded()
+                + mParameters.getNumOfExtraBuffersForImageProc());
+        }
+        break;
+    case CAM_STREAM_TYPE_VIDEO:
+        streamInfo->dis_enable = mParameters.isDISEnabled();
+        if (mParameters.getBufBatchCount()) {
+            //Update stream info structure with batch mode info
+            streamInfo->streaming_mode = CAM_STREAMING_MODE_BATCH;
+            streamInfo->user_buf_info.frame_buf_cnt = mParameters.getBufBatchCount();
+            streamInfo->user_buf_info.size =
+                    (uint32_t)(sizeof(struct msm_camera_user_buf_cont_t));
+            cam_fps_range_t pFpsRange;
+            mParameters.getHfrFps(pFpsRange);
+            streamInfo->user_buf_info.frameInterval =
+                    (long)((1000/pFpsRange.video_max_fps) * 1000);
+            CDBG_HIGH("%s: Video Batch Count = %d, interval = %d", __func__,
+                    streamInfo->user_buf_info.frame_buf_cnt,
+                    streamInfo->user_buf_info.frameInterval);
+        }
+    case CAM_STREAM_TYPE_PREVIEW:
+        if (mParameters.getRecordingHintValue()) {
+            const char* dis_param = mParameters.get(QCameraParameters::KEY_QC_DIS);
+            bool disEnabled = (dis_param != NULL)
+                    && !strcmp(dis_param,QCameraParameters::VALUE_ENABLE);
+            if(disEnabled) {
+                streamInfo->is_type = mParameters.getISType();
+            } else {
+                streamInfo->is_type = IS_TYPE_NONE;
+            }
+        }
+        if (mParameters.isSecureMode()) {
+            streamInfo->is_secure = SECURE;
+        }
+        break;
+    case CAM_STREAM_TYPE_ANALYSIS:
+        streamInfo->noFrameExpected = 1;
+        break;
+    default:
+        break;
+    }
+
+    // Update feature mask
+    mParameters.updatePpFeatureMask(stream_type);
+
+    // Get feature mask
+    mParameters.getStreamPpMask(stream_type, streamInfo->pp_config.feature_mask);
+
+    // Update pp config
+    if (streamInfo->pp_config.feature_mask & CAM_QCOM_FEATURE_FLIP) {
+        int flipMode = mParameters.getFlipMode(stream_type);
+        if (flipMode > 0) {
+            streamInfo->pp_config.flip = (uint32_t)flipMode;
+        }
+    }
+    if (streamInfo->pp_config.feature_mask & CAM_QCOM_FEATURE_SHARPNESS) {
+        streamInfo->pp_config.sharpness = mParameters.getInt(QCameraParameters::KEY_QC_SHARPNESS);
+    }
+    if (streamInfo->pp_config.feature_mask & CAM_QCOM_FEATURE_EFFECT) {
+        streamInfo->pp_config.effect = mParameters.getEffectValue();
+    }
+
+    if (streamInfo->pp_config.feature_mask & CAM_QCOM_FEATURE_DENOISE2D) {
+        streamInfo->pp_config.denoise2d.denoise_enable = 1;
+        streamInfo->pp_config.denoise2d.process_plates =
+                mParameters.getDenoiseProcessPlate(CAM_INTF_PARM_WAVELET_DENOISE);
+    }
+
+    if (!((needReprocess()) && (CAM_STREAM_TYPE_SNAPSHOT == stream_type ||
+            CAM_STREAM_TYPE_RAW == stream_type))) {
+        if (gCamCaps[mCameraId]->min_required_pp_mask & CAM_QCOM_FEATURE_CROP)
+            streamInfo->pp_config.feature_mask |= CAM_QCOM_FEATURE_CROP;
+        if (gCamCaps[mCameraId]->min_required_pp_mask & CAM_QCOM_FEATURE_SCALE)
+            streamInfo->pp_config.feature_mask |= CAM_QCOM_FEATURE_SCALE;
+    }
+
+    CDBG_HIGH("%s: allocateStreamInfoBuf: stream type: %d, pp_mask: 0x%x",
+            __func__, stream_type, streamInfo->pp_config.feature_mask);
+
+    return streamInfoBuf;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateStreamUserBuf
+ *
+ * DESCRIPTION: allocate user ptr for stream buffers
+ *
+ * PARAMETERS :
+ *   @streamInfo  : stream info structure
+ *
+ * RETURN     : ptr to a memory obj that holds stream info buffer.
+ *                    NULL if failed
+
+ *==========================================================================*/
+QCameraMemory *QCamera2HardwareInterface::allocateStreamUserBuf(
+        cam_stream_info_t *streamInfo)
+{
+    int rc = NO_ERROR;
+    QCameraMemory *mem = NULL;
+    int bufferCnt = 0;
+    int size = 0;
+
+    if (streamInfo->streaming_mode != CAM_STREAMING_MODE_BATCH) {
+        ALOGE("%s: Stream is not in BATCH mode. Invalid Stream", __func__);
+        return NULL;
+    }
+
+    // Allocate stream user buffer memory object
+    switch (streamInfo->stream_type) {
+    case CAM_STREAM_TYPE_VIDEO: {
+        QCameraVideoMemory *video_mem = new QCameraVideoMemory(
+                mGetMemory, FALSE, CAM_STREAM_BUF_TYPE_USERPTR);
+        video_mem->allocateMeta(streamInfo->num_bufs);
+        mem = static_cast<QCameraMemory *>(video_mem);
+    }
+    break;
+
+    case CAM_STREAM_TYPE_PREVIEW:
+    case CAM_STREAM_TYPE_POSTVIEW:
+    case CAM_STREAM_TYPE_ANALYSIS:
+    case CAM_STREAM_TYPE_SNAPSHOT:
+    case CAM_STREAM_TYPE_RAW:
+    case CAM_STREAM_TYPE_METADATA:
+    case CAM_STREAM_TYPE_OFFLINE_PROC:
+    case CAM_STREAM_TYPE_CALLBACK:
+        ALOGE("%s: Stream type Not supported.for BATCH processing", __func__);
+    break;
+
+    case CAM_STREAM_TYPE_DEFAULT:
+    case CAM_STREAM_TYPE_MAX:
+    default:
+        break;
+    }
+    if (!mem) {
+        ALOGE("%s: Failed to allocate mem", __func__);
+        return NULL;
+    }
+
+    /*Size of this buffer will be number of batch buffer */
+    size = PAD_TO_SIZE((streamInfo->num_bufs * streamInfo->user_buf_info.size),
+            CAM_PAD_TO_4K);
+
+    CDBG_HIGH("%s: Allocating BATCH Buffer count = %d", __func__, streamInfo->num_bufs);
+
+    if (size > 0) {
+        // Allocating one buffer for all batch buffers
+        rc = mem->allocate(1, size, NON_SECURE);
+        if (rc < 0) {
+            delete mem;
+            return NULL;
+        }
+    }
+    return mem;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : setPreviewWindow
+ *
+ * DESCRIPTION: set preview window impl
+ *
+ * PARAMETERS :
+ *   @window  : ptr to window ops table struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::setPreviewWindow(
+        struct preview_stream_ops *window)
+{
+    mPreviewWindow = window;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setCallBacks
+ *
+ * DESCRIPTION: set callbacks impl
+ *
+ * PARAMETERS :
+ *   @notify_cb  : notify cb
+ *   @data_cb    : data cb
+ *   @data_cb_timestamp : data cb with time stamp
+ *   @get_memory : request memory ops table
+ *   @user       : user data ptr
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::setCallBacks(camera_notify_callback notify_cb,
+                                            camera_data_callback data_cb,
+                                            camera_data_timestamp_callback data_cb_timestamp,
+                                            camera_request_memory get_memory,
+                                            void *user)
+{
+    mNotifyCb        = notify_cb;
+    mDataCb          = data_cb;
+    mDataCbTimestamp = data_cb_timestamp;
+    mGetMemory       = get_memory;
+    mCallbackCookie  = user;
+    m_cbNotifier.setCallbacks(notify_cb, data_cb, data_cb_timestamp, user);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : enableMsgType
+ *
+ * DESCRIPTION: enable msg type impl
+ *
+ * PARAMETERS :
+ *   @msg_type  : msg type mask to be enabled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::enableMsgType(int32_t msg_type)
+{
+    mMsgEnabled |= msg_type;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : disableMsgType
+ *
+ * DESCRIPTION: disable msg type impl
+ *
+ * PARAMETERS :
+ *   @msg_type  : msg type mask to be disabled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::disableMsgType(int32_t msg_type)
+{
+    mMsgEnabled &= ~msg_type;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : msgTypeEnabled
+ *
+ * DESCRIPTION: impl to determine if certain msg_type is enabled
+ *
+ * PARAMETERS :
+ *   @msg_type  : msg type mask
+ *
+ * RETURN     : 0 -- not enabled
+ *              none 0 -- enabled
+ *==========================================================================*/
+int QCamera2HardwareInterface::msgTypeEnabled(int32_t msg_type)
+{
+    return (mMsgEnabled & msg_type);
+}
+
+/*===========================================================================
+ * FUNCTION   : msgTypeEnabledWithLock
+ *
+ * DESCRIPTION: impl to determine if certain msg_type is enabled with lock
+ *
+ * PARAMETERS :
+ *   @msg_type  : msg type mask
+ *
+ * RETURN     : 0 -- not enabled
+ *              none 0 -- enabled
+ *==========================================================================*/
+int QCamera2HardwareInterface::msgTypeEnabledWithLock(int32_t msg_type)
+{
+    int enabled = 0;
+    lockAPI();
+    enabled = mMsgEnabled & msg_type;
+    unlockAPI();
+    return enabled;
+}
+
+/*===========================================================================
+ * FUNCTION   : startPreview
+ *
+ * DESCRIPTION: start preview impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::startPreview()
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+    CDBG_HIGH("%s: E", __func__);
+    // start preview stream
+    if (mParameters.isZSLMode() && mParameters.getRecordingHintValue() != true) {
+        rc = startChannel(QCAMERA_CH_TYPE_ZSL);
+    } else {
+        rc = startChannel(QCAMERA_CH_TYPE_PREVIEW);
+        /*
+          CAF needs cancel auto focus to resume after snapshot.
+          Focus should be locked till take picture is done.
+          In Non-zsl case if focus mode is CAF then calling cancel auto focus
+          to resume CAF.
+        */
+        cam_focus_mode_type focusMode = mParameters.getFocusMode();
+        if (focusMode == CAM_FOCUS_MODE_CONTINOUS_PICTURE)
+            mCameraHandle->ops->cancel_auto_focus(mCameraHandle->camera_handle);
+    }
+    updatePostPreviewParameters();
+    CDBG_HIGH("%s: X", __func__);
+    return rc;
+}
+
+int32_t QCamera2HardwareInterface::updatePostPreviewParameters() {
+    // Enable OIS only in Camera mode and 4k2k camcoder mode
+    int32_t rc = NO_ERROR;
+    rc = mParameters.updateOisValue(1);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : stopPreview
+ *
+ * DESCRIPTION: stop preview impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::stopPreview()
+{
+    ATRACE_CALL();
+    CDBG_HIGH("%s: E", __func__);
+    // stop preview stream
+    stopChannel(QCAMERA_CH_TYPE_ZSL);
+    stopChannel(QCAMERA_CH_TYPE_PREVIEW);
+
+    m_cbNotifier.flushPreviewNotifications();
+    // delete all channels from preparePreview
+    unpreparePreview();
+    CDBG_HIGH("%s: X", __func__);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : storeMetaDataInBuffers
+ *
+ * DESCRIPTION: enable store meta data in buffers for video frames impl
+ *
+ * PARAMETERS :
+ *   @enable  : flag if need enable
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::storeMetaDataInBuffers(int enable)
+{
+    mStoreMetaDataInFrame = enable;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : startRecording
+ *
+ * DESCRIPTION: start recording impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::startRecording()
+{
+    int32_t rc = NO_ERROR;
+    CDBG_HIGH("%s: E", __func__);
+    if (mParameters.getRecordingHintValue() == false) {
+        ALOGE("%s: start recording when hint is false, stop preview first", __func__);
+        stopPreview();
+
+        // Set recording hint to TRUE
+        mParameters.updateRecordingHintValue(TRUE);
+        rc = preparePreview();
+        if (rc == NO_ERROR) {
+            rc = startChannel(QCAMERA_CH_TYPE_PREVIEW);
+        }
+    }
+
+    if (rc == NO_ERROR) {
+        rc = startChannel(QCAMERA_CH_TYPE_VIDEO);
+    }
+
+    if (rc == NO_ERROR) {
+        // Set power Hint for video encoding
+        m_perfLock.powerHint(POWER_HINT_VIDEO_ENCODE, 1);
+    }
+
+    CDBG_HIGH("%s: X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stopRecording
+ *
+ * DESCRIPTION: stop recording impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::stopRecording()
+{
+    CDBG_HIGH("%s: E", __func__);
+    int rc = stopChannel(QCAMERA_CH_TYPE_VIDEO);
+
+    if (rc == NO_ERROR) {
+        // Disable power Hint
+        m_perfLock.powerHint(POWER_HINT_VIDEO_ENCODE, 0);
+    }
+    CDBG_HIGH("%s: X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseRecordingFrame
+ *
+ * DESCRIPTION: return video frame impl
+ *
+ * PARAMETERS :
+ *   @opaque  : ptr to video frame to be returned
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::releaseRecordingFrame(const void * opaque)
+{
+    int32_t rc = UNKNOWN_ERROR;
+    QCameraVideoChannel *pChannel =
+        (QCameraVideoChannel *)m_channels[QCAMERA_CH_TYPE_VIDEO];
+    CDBG_HIGH("%s: opaque data = %p", __func__,opaque);
+    if(pChannel != NULL) {
+        rc = pChannel->releaseFrame(opaque, mStoreMetaDataInFrame > 0);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : autoFocus
+ *
+ * DESCRIPTION: start auto focus impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::autoFocus()
+{
+    int rc = NO_ERROR;
+    setCancelAutoFocus(false);
+    cam_focus_mode_type focusMode = mParameters.getFocusMode();
+
+    switch (focusMode) {
+    case CAM_FOCUS_MODE_AUTO:
+    case CAM_FOCUS_MODE_MACRO:
+    case CAM_FOCUS_MODE_CONTINOUS_VIDEO:
+    case CAM_FOCUS_MODE_CONTINOUS_PICTURE:
+        rc = mCameraHandle->ops->do_auto_focus(mCameraHandle->camera_handle);
+        break;
+    case CAM_FOCUS_MODE_INFINITY:
+    case CAM_FOCUS_MODE_FIXED:
+    case CAM_FOCUS_MODE_EDOF:
+    default:
+        ALOGE("%s: No ops in focusMode (%d)", __func__, focusMode);
+        rc = sendEvtNotify(CAMERA_MSG_FOCUS, true, 0);
+        break;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : cancelAutoFocus
+ *
+ * DESCRIPTION: cancel auto focus impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::cancelAutoFocus()
+{
+    int rc = NO_ERROR;
+    setCancelAutoFocus(true);
+    cam_focus_mode_type focusMode = mParameters.getFocusMode();
+
+    switch (focusMode) {
+    case CAM_FOCUS_MODE_AUTO:
+    case CAM_FOCUS_MODE_MACRO:
+    case CAM_FOCUS_MODE_CONTINOUS_VIDEO:
+    case CAM_FOCUS_MODE_CONTINOUS_PICTURE:
+        rc = mCameraHandle->ops->cancel_auto_focus(mCameraHandle->camera_handle);
+        break;
+    case CAM_FOCUS_MODE_INFINITY:
+    case CAM_FOCUS_MODE_FIXED:
+    case CAM_FOCUS_MODE_EDOF:
+    default:
+        CDBG("%s: No ops in focusMode (%d)", __func__, focusMode);
+        break;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : processUFDumps
+ *
+ * DESCRIPTION: process UF jpeg dumps for refocus support
+ *
+ * PARAMETERS :
+ *   @evt     : payload of jpeg event, including information about jpeg encoding
+ *              status, jpeg size and so on.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : none
+ *==========================================================================*/
+bool QCamera2HardwareInterface::processUFDumps(qcamera_jpeg_evt_payload_t *evt)
+{
+   bool ret = true;
+   if (mParameters.isUbiRefocus()) {
+       int index = (int)getOutputImageCount();
+       bool allFocusImage = (index == ((int)mParameters.getRefocusOutputCount() - 1));
+       char name[FILENAME_MAX];
+
+       camera_memory_t *jpeg_mem = NULL;
+       omx_jpeg_ouput_buf_t *jpeg_out = NULL;
+       size_t dataLen;
+       uint8_t *dataPtr;
+       if (!m_postprocessor.getJpegMemOpt()) {
+           dataLen = evt->out_data.buf_filled_len;
+           dataPtr = evt->out_data.buf_vaddr;
+       } else {
+           jpeg_out  = (omx_jpeg_ouput_buf_t*) evt->out_data.buf_vaddr;
+           if (!jpeg_out) {
+              ALOGE("%s:%d] Null pointer detected",  __func__, __LINE__);
+              return false;
+           }
+           jpeg_mem = (camera_memory_t *)jpeg_out->mem_hdl;
+           if (!jpeg_mem) {
+              ALOGE("%s:%d] Null pointer detected",  __func__, __LINE__);
+              return false;
+           }
+           dataPtr = (uint8_t *)jpeg_mem->data;
+           dataLen = jpeg_mem->size;
+       }
+
+       if (allFocusImage)  {
+           snprintf(name, sizeof(name), "AllFocusImage");
+           index = -1;
+       } else {
+           snprintf(name, sizeof(name), "%d", 0);
+       }
+       CAM_DUMP_TO_FILE(QCAMERA_DUMP_FRM_LOCATION"ubifocus", name, index, "jpg",
+           dataPtr, dataLen);
+       CDBG("%s:%d] Dump the image %d %d allFocusImage %d", __func__, __LINE__,
+           getOutputImageCount(), index, allFocusImage);
+       setOutputImageCount(getOutputImageCount() + 1);
+       if (!allFocusImage) {
+           ret = false;
+       }
+   }
+   return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : unconfigureAdvancedCapture
+ *
+ * DESCRIPTION: unconfigure Advanced Capture.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::unconfigureAdvancedCapture()
+{
+    int32_t rc = NO_ERROR;
+
+    if (mAdvancedCaptureConfigured) {
+
+        mAdvancedCaptureConfigured = false;
+
+        if(mIs3ALocked) {
+            mParameters.set3ALock(QCameraParameters::VALUE_FALSE);
+            mIs3ALocked = false;
+        }
+        if (mParameters.isHDREnabled() || mParameters.isAEBracketEnabled()) {
+            rc = mParameters.setToneMapMode(true, true);
+            if (rc != NO_ERROR) {
+                CDBG_HIGH("%s: Failed to enable tone map during HDR/AEBracketing", __func__);
+            }
+            mHDRBracketingEnabled = false;
+            rc = mParameters.stopAEBracket();
+        } else if (mParameters.isChromaFlashEnabled()) {
+            rc = mParameters.resetFrameCapture(TRUE);
+        } else if (mParameters.isUbiFocusEnabled() || mParameters.isUbiRefocus()) {
+            rc = configureAFBracketing(false);
+        } else if (mParameters.isOptiZoomEnabled()) {
+            rc = mParameters.setAndCommitZoom(mZoomLevel);
+        } else if (mParameters.isStillMoreEnabled()) {
+            cam_still_more_t stillmore_config = mParameters.getStillMoreSettings();
+            stillmore_config.burst_count = 0;
+            mParameters.setStillMoreSettings(stillmore_config);
+
+            /* If SeeMore is running, it will handle re-enabling tone map */
+            if (!mParameters.isSeeMoreEnabled()) {
+                rc = mParameters.setToneMapMode(true, true);
+                if (rc != NO_ERROR) {
+                    CDBG_HIGH("%s: Failed to enable tone map during StillMore", __func__);
+                }
+            }
+
+            /* Re-enable Tintless */
+            mParameters.setTintless(true);
+        } else {
+            ALOGE("%s: No Advanced Capture feature enabled!! ", __func__);
+            rc = BAD_VALUE;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureAdvancedCapture
+ *
+ * DESCRIPTION: configure Advanced Capture.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::configureAdvancedCapture()
+{
+    CDBG_HIGH("%s: E",__func__);
+    int32_t rc = NO_ERROR;
+
+    setOutputImageCount(0);
+    mInputCount = 0;
+
+    /* Temporarily stop display only if not in stillmore livesnapshot */
+    if (!(mParameters.isStillMoreEnabled() &&
+            mParameters.isSeeMoreEnabled())) {
+        mParameters.setDisplayFrame(FALSE);
+    }
+
+    if (mParameters.isUbiFocusEnabled() || mParameters.isUbiRefocus()) {
+        rc = configureAFBracketing();
+    } else if (mParameters.isOptiZoomEnabled()) {
+        rc = configureOptiZoom();
+    } else if (mParameters.isChromaFlashEnabled()) {
+        rc = mParameters.configFrameCapture(TRUE);
+    } else if(mParameters.isHDREnabled()) {
+        rc = configureHDRBracketing();
+        if (mHDRBracketingEnabled) {
+            rc = mParameters.setToneMapMode(false, true);
+            if (rc != NO_ERROR) {
+                CDBG_HIGH("%s: Failed to disable tone map during HDR", __func__);
+            }
+        }
+    } else if (mParameters.isAEBracketEnabled()) {
+        rc = mParameters.setToneMapMode(false, true);
+        if (rc != NO_ERROR) {
+            CDBG_HIGH("%s: Failed to disable tone map during AEBracketing", __func__);
+        }
+        rc = configureAEBracketing();
+    } else if (mParameters.isStillMoreEnabled()) {
+        rc = configureStillMore();
+    } else {
+        ALOGE("%s: No Advanced Capture feature enabled!! ", __func__);
+        rc = BAD_VALUE;
+    }
+
+    if (NO_ERROR == rc) {
+        mAdvancedCaptureConfigured = true;
+    } else {
+        mAdvancedCaptureConfigured = false;
+    }
+
+    CDBG_HIGH("%s: X",__func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureAFBracketing
+ *
+ * DESCRIPTION: configure AF Bracketing.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::configureAFBracketing(bool enable)
+{
+    CDBG_HIGH("%s: E",__func__);
+    int32_t rc = NO_ERROR;
+    cam_af_bracketing_t *af_bracketing_need;
+
+    if (mParameters.isUbiRefocus()) {
+        af_bracketing_need =
+                &gCamCaps[mCameraId]->refocus_af_bracketing_need;
+    } else {
+        af_bracketing_need =
+                &gCamCaps[mCameraId]->ubifocus_af_bracketing_need;
+    }
+
+    //Enable AF Bracketing.
+    cam_af_bracketing_t afBracket;
+    memset(&afBracket, 0, sizeof(cam_af_bracketing_t));
+    afBracket.enable = enable;
+    afBracket.burst_count = af_bracketing_need->burst_count;
+
+    for(int8_t i = 0; i < MAX_AF_BRACKETING_VALUES; i++) {
+        afBracket.focus_steps[i] = af_bracketing_need->focus_steps[i];
+        CDBG_HIGH("%s: focus_step[%d] = %d", __func__, i, afBracket.focus_steps[i]);
+    }
+    //Send cmd to backend to set AF Bracketing for Ubi Focus.
+    rc = mParameters.commitAFBracket(afBracket);
+    if ( NO_ERROR != rc ) {
+        ALOGE("%s: cannot configure AF bracketing", __func__);
+        return rc;
+    }
+    if (enable) {
+        mParameters.set3ALock(QCameraParameters::VALUE_TRUE);
+        mIs3ALocked = true;
+    }
+    CDBG_HIGH("%s: X",__func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureHDRBracketing
+ *
+ * DESCRIPTION: configure HDR Bracketing.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::configureHDRBracketing()
+{
+    CDBG_HIGH("%s: E",__func__);
+    int32_t rc = NO_ERROR;
+
+    // 'values' should be in "idx1,idx2,idx3,..." format
+    uint32_t hdrFrameCount = gCamCaps[mCameraId]->hdr_bracketing_setting.num_frames;
+    CDBG_HIGH("%s : HDR values %d, %d frame count: %u",
+          __func__,
+          (int8_t) gCamCaps[mCameraId]->hdr_bracketing_setting.exp_val.values[0],
+          (int8_t) gCamCaps[mCameraId]->hdr_bracketing_setting.exp_val.values[1],
+          hdrFrameCount);
+
+    // Enable AE Bracketing for HDR
+    cam_exp_bracketing_t aeBracket;
+    memset(&aeBracket, 0, sizeof(cam_exp_bracketing_t));
+    aeBracket.mode =
+        gCamCaps[mCameraId]->hdr_bracketing_setting.exp_val.mode;
+
+    if (aeBracket.mode == CAM_EXP_BRACKETING_ON) {
+        mHDRBracketingEnabled = true;
+    }
+
+    String8 tmp;
+    for (uint32_t i = 0; i < hdrFrameCount; i++) {
+        tmp.appendFormat("%d",
+            (int8_t) gCamCaps[mCameraId]->hdr_bracketing_setting.exp_val.values[i]);
+        tmp.append(",");
+    }
+    if (mParameters.isHDR1xFrameEnabled()
+        && mParameters.isHDR1xExtraBufferNeeded()) {
+            tmp.appendFormat("%d", 0);
+            tmp.append(",");
+    }
+
+    if( !tmp.isEmpty() &&
+        ( MAX_EXP_BRACKETING_LENGTH > tmp.length() ) ) {
+        //Trim last comma
+        memset(aeBracket.values, '\0', MAX_EXP_BRACKETING_LENGTH);
+        memcpy(aeBracket.values, tmp.string(), tmp.length() - 1);
+    }
+
+    CDBG_HIGH("%s : HDR config values %s",
+          __func__,
+          aeBracket.values);
+    rc = mParameters.setHDRAEBracket(aeBracket);
+    if ( NO_ERROR != rc ) {
+        ALOGE("%s: cannot configure HDR bracketing", __func__);
+        return rc;
+    }
+    CDBG_HIGH("%s: X",__func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureAEBracketing
+ *
+ * DESCRIPTION: configure AE Bracketing.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::configureAEBracketing()
+{
+    CDBG_HIGH("%s: E",__func__);
+    int32_t rc = NO_ERROR;
+
+    rc = mParameters.setAEBracketing();
+    if ( NO_ERROR != rc ) {
+        ALOGE("%s: cannot configure AE bracketing", __func__);
+        return rc;
+    }
+    CDBG_HIGH("%s: X",__func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureOptiZoom
+ *
+ * DESCRIPTION: configure Opti Zoom.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::configureOptiZoom()
+{
+    int32_t rc = NO_ERROR;
+
+    //store current zoom level.
+    mZoomLevel = mParameters.getParmZoomLevel();
+
+    //set zoom level to 1x;
+    mParameters.setAndCommitZoom(0);
+
+    mParameters.set3ALock(QCameraParameters::VALUE_TRUE);
+    mIs3ALocked = true;
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureStillMore
+ *
+ * DESCRIPTION: configure StillMore.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::configureStillMore()
+{
+    int32_t rc = NO_ERROR;
+    uint8_t burst_cnt = 0;
+    cam_still_more_t stillmore_config;
+    cam_still_more_t stillmore_cap;
+
+    /* Disable Tone Map. If seemore is enabled, it will handle disabling it. */
+    if (!mParameters.isSeeMoreEnabled()) {
+        rc = mParameters.setToneMapMode(false, true);
+        if (rc != NO_ERROR) {
+            CDBG_HIGH("%s: Failed to disable tone map during StillMore", __func__);
+        }
+    }
+
+    /* Lock 3A */
+    mParameters.set3ALock(QCameraParameters::VALUE_TRUE);
+    mIs3ALocked = true;
+
+    /* Disable Tintless */
+    mParameters.setTintless(false);
+
+    /* Configure burst count based on user input */
+    char prop[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.imglib.stillmore", prop, "0");
+    burst_cnt = (uint32_t)atoi(prop);
+
+    /* In the case of liveshot, burst should be 1 */
+    if (mParameters.isSeeMoreEnabled()) {
+        burst_cnt = 1;
+    }
+
+    /* Validate burst count */
+    stillmore_cap = mParameters.getStillMoreCapability();
+    if ((burst_cnt < stillmore_cap.min_burst_count) ||
+            (burst_cnt > stillmore_cap.max_burst_count)) {
+        burst_cnt = stillmore_cap.max_burst_count;
+    }
+
+    memset(&stillmore_config, 0, sizeof(cam_still_more_t));
+    stillmore_config.burst_count = burst_cnt;
+    mParameters.setStillMoreSettings(stillmore_config);
+
+    CDBG_HIGH("%s: Stillmore burst %d", __func__, burst_cnt);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stopAdvancedCapture
+ *
+ * DESCRIPTION: stops advanced capture based on capture type
+ *
+ * PARAMETERS :
+ *   @pChannel : channel.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::stopAdvancedCapture(
+        QCameraPicChannel *pChannel)
+{
+    CDBG_HIGH("%s: stop bracketig",__func__);
+    int32_t rc = NO_ERROR;
+
+    if(mParameters.isUbiFocusEnabled() || mParameters.isUbiRefocus()) {
+        rc = pChannel->stopAdvancedCapture(MM_CAMERA_AF_BRACKETING);
+    } else if (mParameters.isChromaFlashEnabled()) {
+        rc = pChannel->stopAdvancedCapture(MM_CAMERA_FRAME_CAPTURE);
+    } else if(mParameters.isHDREnabled()
+            || mParameters.isAEBracketEnabled()) {
+        rc = pChannel->stopAdvancedCapture(MM_CAMERA_AE_BRACKETING);
+    } else if (mParameters.isOptiZoomEnabled()) {
+        rc = pChannel->stopAdvancedCapture(MM_CAMERA_ZOOM_1X);
+    } else if (mParameters.isStillMoreEnabled()) {
+        CDBG_HIGH("%s: stopAdvancedCapture not needed for StillMore", __func__);
+    } else {
+        ALOGE("%s: No Advanced Capture feature enabled!",__func__);
+        rc = BAD_VALUE;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : startAdvancedCapture
+ *
+ * DESCRIPTION: starts advanced capture based on capture type
+ *
+ * PARAMETERS :
+ *   @pChannel : channel.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::startAdvancedCapture(
+        QCameraPicChannel *pChannel)
+{
+    CDBG_HIGH("%s: Start bracketing",__func__);
+    int32_t rc = NO_ERROR;
+
+    if(mParameters.isUbiFocusEnabled() || mParameters.isUbiRefocus()) {
+        rc = pChannel->startAdvancedCapture(MM_CAMERA_AF_BRACKETING);
+    } else if (mParameters.isOptiZoomEnabled()) {
+        rc = pChannel->startAdvancedCapture(MM_CAMERA_ZOOM_1X);
+    } else if (mParameters.isStillMoreEnabled()) {
+        CDBG_HIGH("%s: startAdvancedCapture not needed for StillMore", __func__);
+    } else if (mParameters.isHDREnabled()
+            || mParameters.isAEBracketEnabled()) {
+        rc = pChannel->startAdvancedCapture(MM_CAMERA_AE_BRACKETING);
+    } else if (mParameters.isChromaFlashEnabled()) {
+        cam_capture_frame_config_t config = mParameters.getCaptureFrameConfig();
+        rc = pChannel->startAdvancedCapture(MM_CAMERA_FRAME_CAPTURE, &config);
+    } else {
+        ALOGE("%s: No Advanced Capture feature enabled!",__func__);
+        rc = BAD_VALUE;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : takePicture
+ *
+ * DESCRIPTION: take picture impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::takePicture()
+{
+    int rc = NO_ERROR;
+
+    // Get total number for snapshots (retro + regular)
+    uint8_t numSnapshots = mParameters.getNumOfSnapshots();
+    // Get number of retro-active snapshots
+    uint8_t numRetroSnapshots = mParameters.getNumOfRetroSnapshots();
+    CDBG_HIGH("%s: E", __func__);
+
+    //Set rotation value from user settings as Jpeg rotation
+    //to configure back-end modules.
+    mParameters.setJpegRotation(mParameters.getRotation());
+
+    // Check if retro-active snapshots are not enabled
+    if (!isRetroPicture() || !mParameters.isZSLMode()) {
+      numRetroSnapshots = 0;
+      CDBG_HIGH("%s: [ZSL Retro] Reset retro snaphot count to zero", __func__);
+    }
+    if (mParameters.isUbiFocusEnabled() ||
+            mParameters.isUbiRefocus() ||
+            mParameters.isOptiZoomEnabled() ||
+            mParameters.isHDREnabled() ||
+            mParameters.isChromaFlashEnabled() ||
+            mParameters.isAEBracketEnabled() ||
+            mParameters.isStillMoreEnabled()) {
+        rc = configureAdvancedCapture();
+        if (rc == NO_ERROR) {
+            numSnapshots = mParameters.getBurstCountForAdvancedCapture();
+        }
+    }
+    CDBG_HIGH("%s: [ZSL Retro] numSnapshots = %d, numRetroSnapshots = %d",
+          __func__, numSnapshots, numRetroSnapshots);
+
+    if (mParameters.isZSLMode()) {
+        QCameraPicChannel *pZSLChannel =
+            (QCameraPicChannel *)m_channels[QCAMERA_CH_TYPE_ZSL];
+        if (NULL != pZSLChannel) {
+
+            rc = configureOnlineRotation(*pZSLChannel);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: online rotation failed", __func__);
+                return rc;
+            }
+
+            // start postprocessor
+            DefferWorkArgs args;
+            memset(&args, 0, sizeof(DefferWorkArgs));
+
+            args.pprocArgs = pZSLChannel;
+            mReprocJob = queueDefferedWork(CMD_DEFF_PPROC_START,
+                    args);
+
+            if (mParameters.isUbiFocusEnabled() ||
+                    mParameters.isUbiRefocus() ||
+                    mParameters.isOptiZoomEnabled() ||
+                    mParameters.isHDREnabled() ||
+                    mParameters.isChromaFlashEnabled() ||
+                    mParameters.isAEBracketEnabled() ||
+                    mParameters.isStillMoreEnabled()) {
+                rc = startAdvancedCapture(pZSLChannel);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: cannot start zsl advanced capture", __func__);
+                    return rc;
+                }
+            }
+            if (mLongshotEnabled && mPrepSnapRun) {
+                mCameraHandle->ops->start_zsl_snapshot(
+                        mCameraHandle->camera_handle,
+                        pZSLChannel->getMyHandle());
+            }
+            rc = pZSLChannel->takePicture(numSnapshots, numRetroSnapshots);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: cannot take ZSL picture, stop pproc", __func__);
+                waitDefferedWork(mReprocJob);
+                m_postprocessor.stop();
+                return rc;
+            }
+        } else {
+            ALOGE("%s: ZSL channel is NULL", __func__);
+            return UNKNOWN_ERROR;
+        }
+    } else {
+
+        // start snapshot
+        if (mParameters.isJpegPictureFormat() ||
+            mParameters.isNV16PictureFormat() ||
+            mParameters.isNV21PictureFormat()) {
+
+            if (!isLongshotEnabled()) {
+
+                rc = addCaptureChannel();
+
+                // normal capture case
+                // need to stop preview channel
+                stopChannel(QCAMERA_CH_TYPE_PREVIEW);
+                delChannel(QCAMERA_CH_TYPE_PREVIEW);
+
+                if (NO_ERROR == rc) {
+                    rc = declareSnapshotStreams();
+                    if (NO_ERROR != rc) {
+                        delChannel(QCAMERA_CH_TYPE_CAPTURE);
+                        return rc;
+                    }
+                }
+
+                waitDefferedWork(mSnapshotJob);
+                waitDefferedWork(mMetadataJob);
+                waitDefferedWork(mRawdataJob);
+
+                {
+                    DefferWorkArgs args;
+                    DefferAllocBuffArgs allocArgs;
+
+                    memset(&args, 0, sizeof(DefferWorkArgs));
+                    memset(&allocArgs, 0, sizeof(DefferAllocBuffArgs));
+
+                    allocArgs.ch = m_channels[QCAMERA_CH_TYPE_CAPTURE];
+                    allocArgs.type = CAM_STREAM_TYPE_POSTVIEW;
+                    args.allocArgs = allocArgs;
+
+                    mPostviewJob = queueDefferedWork(CMD_DEFF_ALLOCATE_BUFF,
+                            args);
+
+                    if (mPostviewJob == -1) {
+                        rc = UNKNOWN_ERROR;
+                    }
+                }
+
+                waitDefferedWork(mPostviewJob);
+            } else {
+                // normal capture case
+                // need to stop preview channel
+
+                stopChannel(QCAMERA_CH_TYPE_PREVIEW);
+                delChannel(QCAMERA_CH_TYPE_PREVIEW);
+
+                rc = declareSnapshotStreams();
+                if (NO_ERROR != rc) {
+                    return rc;
+                }
+
+                rc = addCaptureChannel();
+            }
+
+            if ((rc == NO_ERROR) &&
+                (NULL != m_channels[QCAMERA_CH_TYPE_CAPTURE])) {
+
+                // configure capture channel
+                rc = m_channels[QCAMERA_CH_TYPE_CAPTURE]->config();
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: cannot configure capture channel", __func__);
+                    delChannel(QCAMERA_CH_TYPE_CAPTURE);
+                    return rc;
+                }
+
+                if (!mParameters.getofflineRAW()) {
+                    rc = configureOnlineRotation(
+                        *m_channels[QCAMERA_CH_TYPE_CAPTURE]);
+                    if (rc != NO_ERROR) {
+                        ALOGE("%s: online rotation failed", __func__);
+                        delChannel(QCAMERA_CH_TYPE_CAPTURE);
+                        return rc;
+                    }
+                }
+
+                DefferWorkArgs args;
+                memset(&args, 0, sizeof(DefferWorkArgs));
+
+                args.pprocArgs = m_channels[QCAMERA_CH_TYPE_CAPTURE];
+                mReprocJob = queueDefferedWork(CMD_DEFF_PPROC_START,
+                        args);
+
+                // start catpure channel
+                rc =  m_channels[QCAMERA_CH_TYPE_CAPTURE]->start();
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: cannot start capture channel", __func__);
+                    delChannel(QCAMERA_CH_TYPE_CAPTURE);
+                    return rc;
+                }
+
+                QCameraPicChannel *pCapChannel =
+                    (QCameraPicChannel *)m_channels[QCAMERA_CH_TYPE_CAPTURE];
+                if (NULL != pCapChannel) {
+                    if (mParameters.isUbiFocusEnabled() ||
+                            mParameters.isUbiRefocus() ||
+                            mParameters.isChromaFlashEnabled()) {
+                        rc = startAdvancedCapture(pCapChannel);
+                        if (rc != NO_ERROR) {
+                            ALOGE("%s: cannot start advanced capture", __func__);
+                            return rc;
+                        }
+                    }
+                }
+                if ( mLongshotEnabled ) {
+                    rc = longShot();
+                    if (NO_ERROR != rc) {
+                        delChannel(QCAMERA_CH_TYPE_CAPTURE);
+                        return rc;
+                    }
+                }
+            } else {
+                ALOGE("%s: cannot add capture channel", __func__);
+                delChannel(QCAMERA_CH_TYPE_CAPTURE);
+                return rc;
+            }
+        } else {
+
+            stopChannel(QCAMERA_CH_TYPE_PREVIEW);
+            delChannel(QCAMERA_CH_TYPE_PREVIEW);
+
+            rc = mParameters.updateRAW(gCamCaps[mCameraId]->raw_dim[0]);
+            if (NO_ERROR != rc) {
+                ALOGE("%s: Raw dimension update failed %d", __func__, rc);
+                return rc;
+            }
+
+            rc = declareSnapshotStreams();
+            if (NO_ERROR != rc) {
+                ALOGE("%s: RAW stream info configuration failed %d",
+                        __func__,
+                        rc);
+                return rc;
+            }
+
+            rc = addRawChannel();
+            if (rc == NO_ERROR) {
+                // start postprocessor
+                rc = m_postprocessor.start(m_channels[QCAMERA_CH_TYPE_RAW]);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: cannot start postprocessor", __func__);
+                    delChannel(QCAMERA_CH_TYPE_RAW);
+                    return rc;
+                }
+
+                rc = startChannel(QCAMERA_CH_TYPE_RAW);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: cannot start raw channel", __func__);
+                    m_postprocessor.stop();
+                    delChannel(QCAMERA_CH_TYPE_RAW);
+                    return rc;
+                }
+            } else {
+                ALOGE("%s: cannot add raw channel", __func__);
+                return rc;
+            }
+        }
+    }
+    CDBG_HIGH("%s: X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureOnlineRotation
+ *
+ * DESCRIPTION: Configure backend with expected rotation for snapshot stream
+ *
+ * PARAMETERS :
+ *    @ch     : Channel containing a snapshot stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::configureOnlineRotation(QCameraChannel &ch)
+{
+    int rc = NO_ERROR;
+    uint32_t streamId = 0;
+    QCameraStream *pStream = NULL;
+
+    for (uint8_t i = 0; i < ch.getNumOfStreams(); i++) {
+        QCameraStream *stream = ch.getStreamByIndex(i);
+        if ((NULL != stream) &&
+                (CAM_STREAM_TYPE_SNAPSHOT == stream->getMyType())) {
+            pStream = stream;
+            break;
+        }
+    }
+
+    if (NULL == pStream) {
+        ALOGE("%s: No snapshot stream found!", __func__);
+        return BAD_VALUE;
+    }
+
+    streamId = pStream->getMyServerID();
+    // Update online rotation configuration
+    pthread_mutex_lock(&m_parm_lock);
+    rc = mParameters.addOnlineRotation(mParameters.getJpegRotation(), streamId,
+            mParameters.getDeviceRotation());
+    if (rc != NO_ERROR) {
+        ALOGE("%s: addOnlineRotation failed %d", __func__, rc);
+        pthread_mutex_unlock(&m_parm_lock);
+        return rc;
+    }
+    pthread_mutex_unlock(&m_parm_lock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : declareSnapshotStreams
+ *
+ * DESCRIPTION: Configure backend with expected snapshot streams
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::declareSnapshotStreams()
+{
+    int rc = NO_ERROR;
+
+    // Update stream info configuration
+    pthread_mutex_lock(&m_parm_lock);
+    rc = mParameters.setStreamConfigure(true, mLongshotEnabled, false);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: setStreamConfigure failed %d", __func__, rc);
+        pthread_mutex_unlock(&m_parm_lock);
+        return rc;
+    }
+    pthread_mutex_unlock(&m_parm_lock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : longShot
+ *
+ * DESCRIPTION: Queue one more ZSL frame
+ *              in the longshot pipe.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::longShot()
+{
+    int32_t rc = NO_ERROR;
+    uint8_t numSnapshots = mParameters.getNumOfSnapshots();
+    QCameraPicChannel *pChannel = NULL;
+
+    if (mParameters.isZSLMode()) {
+        pChannel = (QCameraPicChannel *)m_channels[QCAMERA_CH_TYPE_ZSL];
+    } else {
+        pChannel = (QCameraPicChannel *)m_channels[QCAMERA_CH_TYPE_CAPTURE];
+    }
+
+    if (NULL != pChannel) {
+        rc = pChannel->takePicture(numSnapshots, 0);
+    } else {
+        ALOGE(" %s : Capture channel not initialized!", __func__);
+        rc = NO_INIT;
+        goto end;
+    }
+
+end:
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stopCaptureChannel
+ *
+ * DESCRIPTION: Stops capture channel
+ *
+ * PARAMETERS :
+ *   @destroy : Set to true to stop and delete camera channel.
+ *              Set to false to only stop capture channel.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::stopCaptureChannel(bool destroy)
+{
+    int rc = NO_ERROR;
+    if (mParameters.isJpegPictureFormat() ||
+        mParameters.isNV16PictureFormat() ||
+        mParameters.isNV21PictureFormat()) {
+        rc = stopChannel(QCAMERA_CH_TYPE_CAPTURE);
+        if (destroy && (NO_ERROR == rc)) {
+            // Destroy camera channel but dont release context
+            rc = delChannel(QCAMERA_CH_TYPE_CAPTURE, false);
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : cancelPicture
+ *
+ * DESCRIPTION: cancel picture impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::cancelPicture()
+{
+    waitDefferedWork(mReprocJob);
+
+    //stop post processor
+    m_postprocessor.stop();
+
+    unconfigureAdvancedCapture();
+
+    mParameters.setDisplayFrame(TRUE);
+
+    if (!mLongshotEnabled) {
+        m_perfLock.lock_rel();
+    }
+
+    if (mParameters.isZSLMode()) {
+        QCameraPicChannel *pZSLChannel =
+            (QCameraPicChannel *)m_channels[QCAMERA_CH_TYPE_ZSL];
+        if (NULL != pZSLChannel) {
+            stopAdvancedCapture(pZSLChannel);
+            pZSLChannel->cancelPicture();
+        }
+    } else {
+
+        // normal capture case
+        if (mParameters.isJpegPictureFormat() ||
+            mParameters.isNV16PictureFormat() ||
+            mParameters.isNV21PictureFormat()) {
+            stopChannel(QCAMERA_CH_TYPE_CAPTURE);
+            delChannel(QCAMERA_CH_TYPE_CAPTURE);
+        } else {
+            stopChannel(QCAMERA_CH_TYPE_RAW);
+            delChannel(QCAMERA_CH_TYPE_RAW);
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : captureDone
+ *
+ * DESCRIPTION: Function called when the capture is completed before encoding
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::captureDone()
+{
+    qcamera_sm_internal_evt_payload_t *payload =
+       (qcamera_sm_internal_evt_payload_t *)
+       malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+    if (NULL != payload) {
+        memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+        payload->evt_type = QCAMERA_INTERNAL_EVT_ZSL_CAPTURE_DONE;
+        int32_t rc = processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: processEvt ZSL capture done failed", __func__);
+            free(payload);
+            payload = NULL;
+        }
+    } else {
+        ALOGE("%s: No memory for ZSL capture done event", __func__);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : Live_Snapshot_thread
+ *
+ * DESCRIPTION: Seperate thread for taking live snapshot during recording
+ *
+ * PARAMETERS : @data - pointer to QCamera2HardwareInterface class object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void* Live_Snapshot_thread (void* data)
+{
+
+    QCamera2HardwareInterface *hw = reinterpret_cast<QCamera2HardwareInterface *>(data);
+    if (!hw) {
+        ALOGE("take_picture_thread: NULL camera device");
+        return (void *)BAD_VALUE;
+    }
+    hw->takeLiveSnapshot_internal();
+    return (void* )NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : Int_Pic_thread
+ *
+ * DESCRIPTION: Seperate thread for taking snapshot triggered by camera backend
+ *
+ * PARAMETERS : @data - pointer to QCamera2HardwareInterface class object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void* Int_Pic_thread (void* data)
+{
+    int rc = NO_ERROR;
+
+    QCamera2HardwareInterface *hw = reinterpret_cast<QCamera2HardwareInterface *>(data);
+
+    if (!hw) {
+        ALOGE("take_picture_thread: NULL camera device");
+        return (void *)BAD_VALUE;
+    }
+
+    bool JpegMemOpt = false;
+    char raw_format[PROPERTY_VALUE_MAX];
+
+    memset(raw_format, 0, sizeof(raw_format));
+
+    rc = hw->takeBackendPic_internal(&JpegMemOpt, &raw_format[0]);
+    if (rc == NO_ERROR) {
+        hw->checkIntPicPending(JpegMemOpt, &raw_format[0]);
+    } else {
+        //Snapshot attempt not successful, we need to do cleanup here
+        hw->clearIntPendingEvents();
+    }
+
+    return (void* )NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : takeLiveSnapshot
+ *
+ * DESCRIPTION: take live snapshot during recording
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::takeLiveSnapshot()
+{
+    int rc = NO_ERROR;
+    rc= pthread_create(&mLiveSnapshotThread, NULL, Live_Snapshot_thread, (void *) this);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : takePictureInternal
+ *
+ * DESCRIPTION: take snapshot triggered by backend
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::takePictureInternal()
+{
+    int rc = NO_ERROR;
+    rc= pthread_create(&mIntPicThread, NULL, Int_Pic_thread, (void *) this);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : checkIntPicPending
+ *
+ * DESCRIPTION: timed wait for jpeg completion event, and send
+ *                        back completion event to backend
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::checkIntPicPending(bool JpegMemOpt, char *raw_format)
+{
+    bool bSendToBackend = true;
+    cam_int_evt_params_t params;
+    int rc = NO_ERROR;
+
+    struct timespec   ts;
+    struct timeval    tp;
+    gettimeofday(&tp, NULL);
+    ts.tv_sec  = tp.tv_sec + 5;
+    ts.tv_nsec = tp.tv_usec * 1000;
+
+    if (true == m_bIntJpegEvtPending ||
+        (true == m_bIntRawEvtPending)) {
+        //Waiting in HAL for snapshot taken notification
+        pthread_mutex_lock(&m_int_lock);
+        rc = pthread_cond_timedwait(&m_int_cond, &m_int_lock, &ts);
+        if (ETIMEDOUT == rc || 0x0 == m_BackendFileName[0]) {
+            //Hit a timeout, or some spurious activity
+            bSendToBackend = false;
+        }
+
+        if (true == m_bIntJpegEvtPending) {
+            params.event_type = 0;
+        } else if (true == m_bIntRawEvtPending) {
+            params.event_type = 1;
+        }
+        pthread_mutex_unlock(&m_int_lock);
+
+        if (true == m_bIntJpegEvtPending) {
+            //Attempting to restart preview after taking JPEG snapshot
+            lockAPI();
+            rc = processAPI(QCAMERA_SM_EVT_SNAPSHOT_DONE, NULL);
+            unlockAPI();
+            m_postprocessor.setJpegMemOpt(JpegMemOpt);
+        } else if (true == m_bIntRawEvtPending) {
+            //Attempting to restart preview after taking RAW snapshot
+            stopChannel(QCAMERA_CH_TYPE_RAW);
+            delChannel(QCAMERA_CH_TYPE_RAW);
+            //restoring the old raw format
+            property_set("persist.camera.raw.format", raw_format);
+        }
+
+        if (true == bSendToBackend) {
+            //send event back to server with the file path
+            params.dim = m_postprocessor.m_dst_dim;
+            memcpy(&params.path[0], &m_BackendFileName[0], QCAMERA_MAX_FILEPATH_LENGTH);
+            memset(&m_BackendFileName[0], 0x0, QCAMERA_MAX_FILEPATH_LENGTH);
+            params.size = mBackendFileSize;
+            pthread_mutex_lock(&m_parm_lock);
+            rc = mParameters.setIntEvent(params);
+            pthread_mutex_unlock(&m_parm_lock);
+        }
+
+        clearIntPendingEvents();
+    }
+
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : takeBackendPic_internal
+ *
+ * DESCRIPTION: take snapshot triggered by backend
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::takeBackendPic_internal(bool *JpegMemOpt, char *raw_format)
+{
+    int rc = NO_ERROR;
+    qcamera_api_result_t apiResult;
+
+    lockAPI();
+    //Set rotation value from user settings as Jpeg rotation
+    //to configure back-end modules.
+    mParameters.setJpegRotation(mParameters.getRotation());
+
+    setRetroPicture(0);
+    /* Prepare snapshot in case LED needs to be flashed */
+    if (mFlashNeeded == 1 || mParameters.isChromaFlashEnabled()) {
+        // Start Preparing for normal Frames
+        CDBG_HIGH("%s: Start Prepare Snapshot", __func__);
+        /* Prepare snapshot in case LED needs to be flashed */
+        rc = processAPI(QCAMERA_SM_EVT_PREPARE_SNAPSHOT, NULL);
+        if (rc == NO_ERROR) {
+            waitAPIResult(QCAMERA_SM_EVT_PREPARE_SNAPSHOT, &apiResult);
+            rc = apiResult.status;
+            CDBG_HIGH("%s: Prep Snapshot done", __func__);
+        }
+        mPrepSnapRun = true;
+    }
+    unlockAPI();
+
+    if (true == m_bIntJpegEvtPending) {
+        //Attempting to take JPEG snapshot
+        *JpegMemOpt = m_postprocessor.getJpegMemOpt();
+        m_postprocessor.setJpegMemOpt(false);
+
+        /* capture */
+        lockAPI();
+        CDBG_HIGH("%s: Capturing internal snapshot", __func__);
+        rc = processAPI(QCAMERA_SM_EVT_TAKE_PICTURE, NULL);
+        if (rc == NO_ERROR) {
+            waitAPIResult(QCAMERA_SM_EVT_TAKE_PICTURE, &apiResult);
+            rc = apiResult.status;
+        }
+        unlockAPI();
+    } else if (true == m_bIntRawEvtPending) {
+        //Attempting to take RAW snapshot
+        (void)JpegMemOpt;
+        stopPreview();
+
+        //getting the existing raw format type
+        property_get("persist.camera.raw.format", raw_format, "16");
+        //setting it to a default know value for this task
+        property_set("persist.camera.raw.format", "18");
+
+        rc = addRawChannel();
+        if (rc == NO_ERROR) {
+            // start postprocessor
+            rc = m_postprocessor.start(m_channels[QCAMERA_CH_TYPE_RAW]);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: cannot start postprocessor", __func__);
+                delChannel(QCAMERA_CH_TYPE_RAW);
+                return rc;
+            }
+
+            rc = startChannel(QCAMERA_CH_TYPE_RAW);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: cannot start raw channel", __func__);
+                m_postprocessor.stop();
+                delChannel(QCAMERA_CH_TYPE_RAW);
+                return rc;
+            }
+        } else {
+            ALOGE("%s: cannot add raw channel", __func__);
+            return rc;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : clearIntPendingEvents
+ *
+ * DESCRIPTION: clear internal pending events pertaining to backend
+ *                        snapshot requests
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+void QCamera2HardwareInterface::clearIntPendingEvents()
+{
+    int rc = NO_ERROR;
+
+    if (true == m_bIntRawEvtPending) {
+        preparePreview();
+        startPreview();
+    }
+    if (true == m_bIntJpegEvtPending) {
+        if (false == mParameters.isZSLMode()) {
+            lockAPI();
+            rc = processAPI(QCAMERA_SM_EVT_START_PREVIEW, NULL);
+            unlockAPI();
+        }
+    }
+
+    pthread_mutex_lock(&m_int_lock);
+    if (true == m_bIntJpegEvtPending) {
+        m_bIntJpegEvtPending = false;
+    } else if (true == m_bIntRawEvtPending) {
+        m_bIntRawEvtPending = false;
+    }
+    pthread_mutex_unlock(&m_int_lock);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : takeLiveSnapshot_internal
+ *
+ * DESCRIPTION: take live snapshot during recording
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::takeLiveSnapshot_internal()
+{
+    int rc = NO_ERROR;
+
+    QCameraChannel *pChannel = NULL;
+
+    //Set rotation value from user settings as Jpeg rotation
+    //to configure back-end modules.
+    mParameters.setJpegRotation(mParameters.getRotation());
+
+    // Configure advanced capture
+    if (mParameters.isUbiFocusEnabled() ||
+            mParameters.isUbiRefocus() ||
+            mParameters.isOptiZoomEnabled() ||
+            mParameters.isHDREnabled() ||
+            mParameters.isChromaFlashEnabled() ||
+            mParameters.isAEBracketEnabled() ||
+            mParameters.isStillMoreEnabled()) {
+        rc = configureAdvancedCapture();
+        if (rc != NO_ERROR) {
+            CDBG_HIGH("%s: configureAdvancedCapture unsuccessful", __func__);
+        }
+    }
+
+    // start post processor
+    rc = m_postprocessor.start(m_channels[QCAMERA_CH_TYPE_SNAPSHOT]);
+    if (NO_ERROR != rc) {
+        ALOGE("%s: Post-processor start failed %d", __func__, rc);
+        goto end;
+    }
+
+    pChannel = m_channels[QCAMERA_CH_TYPE_SNAPSHOT];
+    if (NULL == pChannel) {
+        ALOGE("%s: Snapshot channel not initialized", __func__);
+        rc = NO_INIT;
+        goto end;
+    }
+    //Disable reprocess for 4K liveshot case
+    if (!mParameters.is4k2kVideoResolution()) {
+        rc = configureOnlineRotation(*m_channels[QCAMERA_CH_TYPE_SNAPSHOT]);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: online rotation failed", __func__);
+            m_postprocessor.stop();
+            return rc;
+        }
+    }
+    // start snapshot channel
+    if ((rc == NO_ERROR) && (NULL != pChannel)) {
+        // Do not link metadata stream for 4K2k resolution
+        // as CPP processing would be done on snapshot stream and not
+        // reprocess stream
+        if (!mParameters.is4k2kVideoResolution()) {
+            // Find and try to link a metadata stream from preview channel
+            QCameraChannel *pMetaChannel = NULL;
+            QCameraStream *pMetaStream = NULL;
+
+            if (m_channels[QCAMERA_CH_TYPE_PREVIEW] != NULL) {
+                pMetaChannel = m_channels[QCAMERA_CH_TYPE_PREVIEW];
+                uint32_t streamNum = pMetaChannel->getNumOfStreams();
+                QCameraStream *pStream = NULL;
+                for (uint32_t i = 0 ; i < streamNum ; i++ ) {
+                    pStream = pMetaChannel->getStreamByIndex(i);
+                    if ((NULL != pStream) &&
+                            (CAM_STREAM_TYPE_METADATA == pStream->getMyType())) {
+                        pMetaStream = pStream;
+                        break;
+                    }
+                }
+            }
+
+            if ((NULL != pMetaChannel) && (NULL != pMetaStream)) {
+                rc = pChannel->linkStream(pMetaChannel, pMetaStream);
+                if (NO_ERROR != rc) {
+                    ALOGE("%s : Metadata stream link failed %d", __func__, rc);
+                }
+            }
+        }
+
+        rc = pChannel->start();
+    }
+
+end:
+    if (rc != NO_ERROR) {
+        rc = processAPI(QCAMERA_SM_EVT_CANCEL_PICTURE, NULL);
+        rc = sendEvtNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : cancelLiveSnapshot
+ *
+ * DESCRIPTION: cancel current live snapshot request
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::cancelLiveSnapshot()
+{
+    int rc = NO_ERROR;
+
+    unconfigureAdvancedCapture();
+    if (!mLongshotEnabled) {
+        m_perfLock.lock_rel();
+    }
+
+    if (mLiveSnapshotThread != 0) {
+        pthread_join(mLiveSnapshotThread,NULL);
+        mLiveSnapshotThread = 0;
+    }
+
+    //stop post processor
+    m_postprocessor.stop();
+
+    // stop snapshot channel
+    rc = stopChannel(QCAMERA_CH_TYPE_SNAPSHOT);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getParameters
+ *
+ * DESCRIPTION: get parameters impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : a string containing parameter pairs
+ *==========================================================================*/
+char* QCamera2HardwareInterface::getParameters()
+{
+    char* strParams = NULL;
+    String8 str;
+
+    int cur_width, cur_height;
+    pthread_mutex_lock(&m_parm_lock);
+    //Need take care Scale picture size
+    if(mParameters.m_reprocScaleParam.isScaleEnabled() &&
+        mParameters.m_reprocScaleParam.isUnderScaling()){
+        int scale_width, scale_height;
+
+        mParameters.m_reprocScaleParam.getPicSizeFromAPK(scale_width,scale_height);
+        mParameters.getPictureSize(&cur_width, &cur_height);
+
+        String8 pic_size;
+        char buffer[32];
+        snprintf(buffer, sizeof(buffer), "%dx%d", scale_width, scale_height);
+        pic_size.append(buffer);
+        mParameters.set(CameraParameters::KEY_PICTURE_SIZE, pic_size);
+    }
+
+    str = mParameters.flatten( );
+    strParams = (char *)malloc(sizeof(char)*(str.length()+1));
+    if(strParams != NULL){
+        memset(strParams, 0, sizeof(char)*(str.length()+1));
+        strlcpy(strParams, str.string(), str.length()+1);
+        strParams[str.length()] = 0;
+    }
+
+    if(mParameters.m_reprocScaleParam.isScaleEnabled() &&
+        mParameters.m_reprocScaleParam.isUnderScaling()){
+        //need set back picture size
+        String8 pic_size;
+        char buffer[32];
+        snprintf(buffer, sizeof(buffer), "%dx%d", cur_width, cur_height);
+        pic_size.append(buffer);
+        mParameters.set(CameraParameters::KEY_PICTURE_SIZE, pic_size);
+    }
+    pthread_mutex_unlock(&m_parm_lock);
+    return strParams;
+}
+
+/*===========================================================================
+ * FUNCTION   : putParameters
+ *
+ * DESCRIPTION: put parameters string impl
+ *
+ * PARAMETERS :
+ *   @parms   : parameters string to be released
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::putParameters(char *parms)
+{
+    free(parms);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : sendCommand
+ *
+ * DESCRIPTION: send command impl
+ *
+ * PARAMETERS :
+ *   @command : command to be executed
+ *   @arg1    : optional argument 1
+ *   @arg2    : optional argument 2
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::sendCommand(int32_t command,
+        int32_t &arg1, int32_t &/*arg2*/)
+{
+    int rc = NO_ERROR;
+
+    switch (command) {
+#ifndef VANILLA_HAL
+    case CAMERA_CMD_LONGSHOT_ON:
+        m_perfLock.lock_acq();
+        arg1 = 0;
+        // Longshot can only be enabled when image capture
+        // is not active.
+        if ( !m_stateMachine.isCaptureRunning() ) {
+            mLongshotEnabled = true;
+            mParameters.setLongshotEnable(mLongshotEnabled);
+
+            // Due to recent buffer count optimizations
+            // ZSL might run with considerably less buffers
+            // when not in longshot mode. Preview needs to
+            // restart in this case.
+            if (isZSLMode() && m_stateMachine.isPreviewRunning()) {
+                QCameraChannel *pChannel = NULL;
+                QCameraStream *pSnapStream = NULL;
+                pChannel = m_channels[QCAMERA_CH_TYPE_ZSL];
+                if (NULL != pChannel) {
+                    QCameraStream *pStream = NULL;
+                    for (uint32_t i = 0; i < pChannel->getNumOfStreams(); i++) {
+                        pStream = pChannel->getStreamByIndex(i);
+                        if (pStream != NULL) {
+                            if (pStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT)) {
+                                pSnapStream = pStream;
+                                break;
+                            }
+                        }
+                    }
+                    if (NULL != pSnapStream) {
+                        uint8_t required = 0;
+                        required = getBufNumRequired(CAM_STREAM_TYPE_SNAPSHOT);
+                        if (pSnapStream->getBufferCount() < required) {
+                            arg1 = QCAMERA_SM_EVT_RESTART_PERVIEW;
+                        }
+                    }
+                }
+            }
+            //
+            mPrepSnapRun = false;
+        } else {
+            rc = NO_INIT;
+        }
+        break;
+    case CAMERA_CMD_LONGSHOT_OFF:
+        m_perfLock.lock_rel();
+        if ( mLongshotEnabled && m_stateMachine.isCaptureRunning() ) {
+            cancelPicture();
+            processEvt(QCAMERA_SM_EVT_SNAPSHOT_DONE, NULL);
+            QCameraChannel *pZSLChannel = m_channels[QCAMERA_CH_TYPE_ZSL];
+            if (isZSLMode() && (NULL != pZSLChannel) && mPrepSnapRun) {
+                mCameraHandle->ops->stop_zsl_snapshot(
+                        mCameraHandle->camera_handle,
+                        pZSLChannel->getMyHandle());
+            }
+        }
+        mPrepSnapRun = false;
+        mLongshotEnabled = false;
+        mParameters.setLongshotEnable(mLongshotEnabled);
+        break;
+    case CAMERA_CMD_HISTOGRAM_ON:
+    case CAMERA_CMD_HISTOGRAM_OFF:
+        rc = setHistogram(command == CAMERA_CMD_HISTOGRAM_ON? true : false);
+        break;
+#endif
+    case CAMERA_CMD_START_FACE_DETECTION:
+    case CAMERA_CMD_STOP_FACE_DETECTION:
+        mParameters.setFaceDetectionOption(command == CAMERA_CMD_START_FACE_DETECTION? true : false);
+        rc = setFaceDetection(command == CAMERA_CMD_START_FACE_DETECTION? true : false);
+        break;
+#ifndef VANILLA_HAL
+    case CAMERA_CMD_HISTOGRAM_SEND_DATA:
+#endif
+    default:
+        rc = NO_ERROR;
+        break;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : registerFaceImage
+ *
+ * DESCRIPTION: register face image impl
+ *
+ * PARAMETERS :
+ *   @img_ptr : ptr to image buffer
+ *   @config  : ptr to config struct about input image info
+ *   @faceID  : [OUT] face ID to uniquely identifiy the registered face image
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::registerFaceImage(void *img_ptr,
+                                                 cam_pp_offline_src_config_t *config,
+                                                 int32_t &faceID)
+{
+    int rc = NO_ERROR;
+    faceID = -1;
+
+    if (img_ptr == NULL || config == NULL) {
+        ALOGE("%s: img_ptr or config is NULL", __func__);
+        return BAD_VALUE;
+    }
+
+    // allocate ion memory for source image
+    QCameraHeapMemory *imgBuf = new QCameraHeapMemory(QCAMERA_ION_USE_CACHE);
+    if (imgBuf == NULL) {
+        ALOGE("%s: Unable to new heap memory obj for image buf", __func__);
+        return NO_MEMORY;
+    }
+
+    rc = imgBuf->allocate(1, config->input_buf_planes.plane_info.frame_len, NON_SECURE);
+    if (rc < 0) {
+        ALOGE("%s: Unable to allocate heap memory for image buf", __func__);
+        delete imgBuf;
+        return NO_MEMORY;
+    }
+
+    void *pBufPtr = imgBuf->getPtr(0);
+    if (pBufPtr == NULL) {
+        ALOGE("%s: image buf is NULL", __func__);
+        imgBuf->deallocate();
+        delete imgBuf;
+        return NO_MEMORY;
+    }
+    memcpy(pBufPtr, img_ptr, config->input_buf_planes.plane_info.frame_len);
+
+    cam_pp_feature_config_t pp_feature;
+    memset(&pp_feature, 0, sizeof(cam_pp_feature_config_t));
+    pp_feature.feature_mask = CAM_QCOM_FEATURE_REGISTER_FACE;
+    QCameraReprocessChannel *pChannel =
+        addOfflineReprocChannel(*config, pp_feature, NULL, NULL);
+
+    if (pChannel == NULL) {
+        ALOGE("%s: fail to add offline reprocess channel", __func__);
+        imgBuf->deallocate();
+        delete imgBuf;
+        return UNKNOWN_ERROR;
+    }
+
+    rc = pChannel->start();
+    if (rc != NO_ERROR) {
+        ALOGE("%s: Cannot start reprocess channel", __func__);
+        imgBuf->deallocate();
+        delete imgBuf;
+        delete pChannel;
+        return rc;
+    }
+
+    ssize_t bufSize = imgBuf->getSize(0);
+    if (BAD_INDEX != bufSize) {
+        rc = pChannel->doReprocess(imgBuf->getFd(0), (size_t)bufSize, faceID);
+    } else {
+        ALOGE("Failed to retrieve buffer size (bad index)");
+        return UNKNOWN_ERROR;
+    }
+
+    // done with register face image, free imgbuf and delete reprocess channel
+    imgBuf->deallocate();
+    delete imgBuf;
+    imgBuf = NULL;
+    pChannel->stop();
+    delete pChannel;
+    pChannel = NULL;
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : release
+ *
+ * DESCRIPTION: release camera resource impl
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::release()
+{
+    // stop and delete all channels
+    for (int i = 0; i <QCAMERA_CH_TYPE_MAX ; i++) {
+        if (m_channels[i] != NULL) {
+            stopChannel((qcamera_ch_type_enum_t)i);
+            delChannel((qcamera_ch_type_enum_t)i);
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : dump
+ *
+ * DESCRIPTION: camera status dump impl
+ *
+ * PARAMETERS :
+ *   @fd      : fd for the buffer to be dumped with camera status
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::dump(int fd)
+{
+    dprintf(fd, "\n Camera HAL information Begin \n");
+    dprintf(fd, "Camera ID: %d \n", mCameraId);
+    dprintf(fd, "StoreMetaDataInFrame: %d \n", mStoreMetaDataInFrame);
+    dprintf(fd, "\n Configuration: %s", mParameters.dump().string());
+    dprintf(fd, "\n State Information: %s", m_stateMachine.dump().string());
+    dprintf(fd, "\n Camera HAL information End \n");
+
+    /* send UPDATE_DEBUG_LEVEL to the backend so that they can read the
+       debug level property */
+    mParameters.updateDebugLevel();
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : processAPI
+ *
+ * DESCRIPTION: process API calls from upper layer
+ *
+ * PARAMETERS :
+ *   @api         : API to be processed
+ *   @api_payload : ptr to API payload if any
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::processAPI(qcamera_sm_evt_enum_t api, void *api_payload)
+{
+    int ret = DEAD_OBJECT;
+
+    if (m_smThreadActive) {
+        ret = m_stateMachine.procAPI(api, api_payload);
+    }
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : processEvt
+ *
+ * DESCRIPTION: process Evt from backend via mm-camera-interface
+ *
+ * PARAMETERS :
+ *   @evt         : event type to be processed
+ *   @evt_payload : ptr to event payload if any
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::processEvt(qcamera_sm_evt_enum_t evt, void *evt_payload)
+{
+    return m_stateMachine.procEvt(evt, evt_payload);
+}
+
+/*===========================================================================
+ * FUNCTION   : processSyncEvt
+ *
+ * DESCRIPTION: process synchronous Evt from backend
+ *
+ * PARAMETERS :
+ *   @evt         : event type to be processed
+ *   @evt_payload : ptr to event payload if any
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::processSyncEvt(qcamera_sm_evt_enum_t evt, void *evt_payload)
+{
+    int rc = NO_ERROR;
+
+    pthread_mutex_lock(&m_evtLock);
+    rc =  processEvt(evt, evt_payload);
+    if (rc == NO_ERROR) {
+        memset(&m_evtResult, 0, sizeof(qcamera_api_result_t));
+        while (m_evtResult.request_api != evt) {
+            pthread_cond_wait(&m_evtCond, &m_evtLock);
+        }
+        rc =  m_evtResult.status;
+    }
+    pthread_mutex_unlock(&m_evtLock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : evtHandle
+ *
+ * DESCRIPTION: Function registerd to mm-camera-interface to handle backend events
+ *
+ * PARAMETERS :
+ *   @camera_handle : event type to be processed
+ *   @evt           : ptr to event
+ *   @user_data     : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::camEvtHandle(uint32_t /*camera_handle*/,
+                                          mm_camera_event_t *evt,
+                                          void *user_data)
+{
+    QCamera2HardwareInterface *obj = (QCamera2HardwareInterface *)user_data;
+    if (obj && evt) {
+        mm_camera_event_t *payload =
+            (mm_camera_event_t *)malloc(sizeof(mm_camera_event_t));
+        if (NULL != payload) {
+            *payload = *evt;
+            //peek into the event, if this is an eztune event from server,
+            //then we don't need to post it to the SM Qs, we shud directly
+            //spawn a thread and get the job done (jpeg or raw snapshot)
+            switch (payload->server_event_type) {
+                case CAM_EVENT_TYPE_INT_TAKE_JPEG:
+                    //Received JPEG trigger from eztune
+                    if (false == obj->m_bIntJpegEvtPending) {
+                        pthread_mutex_lock(&obj->m_int_lock);
+                        obj->m_bIntJpegEvtPending = true;
+                        pthread_mutex_unlock(&obj->m_int_lock);
+                        obj->takePictureInternal();
+                    }
+                    free(payload);
+                    break;
+                case CAM_EVENT_TYPE_INT_TAKE_RAW:
+                    //Received RAW trigger from eztune
+                    if (false == obj->m_bIntRawEvtPending) {
+                        pthread_mutex_lock(&obj->m_int_lock);
+                        obj->m_bIntRawEvtPending = true;
+                        pthread_mutex_unlock(&obj->m_int_lock);
+                        obj->takePictureInternal();
+                    }
+                    free(payload);
+                    break;
+                case CAM_EVENT_TYPE_DAEMON_DIED:
+                    {
+                        Mutex::Autolock l(obj->mDeffLock);
+                        obj->mDeffCond.broadcast();
+                        CDBG_HIGH("%s: broadcast mDeffCond signal\n", __func__);
+                    }
+                default:
+                    obj->processEvt(QCAMERA_SM_EVT_EVT_NOTIFY, payload);
+                    break;
+            }
+        }
+    } else {
+        ALOGE("%s: NULL user_data", __func__);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : jpegEvtHandle
+ *
+ * DESCRIPTION: Function registerd to mm-jpeg-interface to handle jpeg events
+ *
+ * PARAMETERS :
+ *   @status    : status of jpeg job
+ *   @client_hdl: jpeg client handle
+ *   @jobId     : jpeg job Id
+ *   @p_ouput   : ptr to jpeg output result struct
+ *   @userdata  : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::jpegEvtHandle(jpeg_job_status_t status,
+                                              uint32_t /*client_hdl*/,
+                                              uint32_t jobId,
+                                              mm_jpeg_output_t *p_output,
+                                              void *userdata)
+{
+    QCamera2HardwareInterface *obj = (QCamera2HardwareInterface *)userdata;
+    if (obj) {
+        qcamera_jpeg_evt_payload_t *payload =
+            (qcamera_jpeg_evt_payload_t *)malloc(sizeof(qcamera_jpeg_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_jpeg_evt_payload_t));
+            payload->status = status;
+            payload->jobId = jobId;
+            if (p_output != NULL) {
+                payload->out_data = *p_output;
+            }
+            obj->processEvt(QCAMERA_SM_EVT_JPEG_EVT_NOTIFY, payload);
+        }
+    } else {
+        ALOGE("%s: NULL user_data", __func__);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : thermalEvtHandle
+ *
+ * DESCRIPTION: routine to handle thermal event notification
+ *
+ * PARAMETERS :
+ *   @level      : thermal level
+ *   @userdata   : userdata passed in during registration
+ *   @data       : opaque data from thermal client
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::thermalEvtHandle(
+        qcamera_thermal_level_enum_t *level, void *userdata, void *data)
+{
+    if (!mCameraOpened) {
+        CDBG_HIGH("%s: Camera is not opened, no need to handle thermal evt", __func__);
+        return NO_ERROR;
+    }
+
+    // Make sure thermal events are logged
+    CDBG_HIGH("%s: level = %d, userdata = %p, data = %p",
+        __func__, *level, userdata, data);
+    //We don't need to lockAPI, waitAPI here. QCAMERA_SM_EVT_THERMAL_NOTIFY
+    // becomes an aync call. This also means we can only pass payload
+    // by value, not by address.
+    return processAPI(QCAMERA_SM_EVT_THERMAL_NOTIFY, (void *)level);
+}
+
+/*===========================================================================
+ * FUNCTION   : sendEvtNotify
+ *
+ * DESCRIPTION: send event notify to notify thread
+ *
+ * PARAMETERS :
+ *   @msg_type: msg type to be sent
+ *   @ext1    : optional extension1
+ *   @ext2    : optional extension2
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::sendEvtNotify(int32_t msg_type,
+                                                 int32_t ext1,
+                                                 int32_t ext2)
+{
+    qcamera_callback_argm_t cbArg;
+    memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+    cbArg.cb_type = QCAMERA_NOTIFY_CALLBACK;
+    cbArg.msg_type = msg_type;
+    cbArg.ext1 = ext1;
+    cbArg.ext2 = ext2;
+    return m_cbNotifier.notifyCallback(cbArg);
+}
+
+/*===========================================================================
+ * FUNCTION   : processAEInfo
+ *
+ * DESCRIPTION: process AE updates
+ *
+ * PARAMETERS :
+ *   @ae_params: current AE parameters
+ *
+ * RETURN     : None
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processAEInfo(cam_3a_params_t &ae_params)
+{
+    pthread_mutex_lock(&m_parm_lock);
+    mParameters.updateAEInfo(ae_params);
+    pthread_mutex_unlock(&m_parm_lock);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : processFocusPositionInfo
+ *
+ * DESCRIPTION: process AF updates
+ *
+ * PARAMETERS :
+ *   @cur_pos_info: current lens position
+ *
+ * RETURN     : None
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processFocusPositionInfo(cam_focus_pos_info_t &cur_pos_info)
+{
+    pthread_mutex_lock(&m_parm_lock);
+    mParameters.updateCurrentFocusPosition(cur_pos_info);
+    pthread_mutex_unlock(&m_parm_lock);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : processAutoFocusEvent
+ *
+ * DESCRIPTION: process auto focus event
+ *
+ * PARAMETERS :
+ *   @focus_data: struct containing auto focus result info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processAutoFocusEvent(cam_auto_focus_data_t &focus_data)
+{
+    int32_t ret = NO_ERROR;
+    CDBG_HIGH("%s: E",__func__);
+
+    m_currentFocusState = focus_data.focus_state;
+
+    cam_focus_mode_type focusMode = mParameters.getFocusMode();
+    switch (focusMode) {
+    case CAM_FOCUS_MODE_AUTO:
+    case CAM_FOCUS_MODE_MACRO:
+        if (getCancelAutoFocus()) {
+            // auto focus has canceled, just ignore it
+            break;
+        }
+        // If the HAL focus mode is AUTO and AF focus mode is INFINITY, send event to app
+        if ((focusMode == CAM_FOCUS_MODE_AUTO) &&
+                (focus_data.focus_mode == CAM_FOCUS_MODE_INFINITY) &&
+                (focus_data.focus_state == CAM_AF_INACTIVE)) {
+            ret = sendEvtNotify(CAMERA_MSG_FOCUS, true, 0);
+            break;
+        }
+        if (focus_data.focus_state == CAM_AF_SCANNING ||
+            focus_data.focus_state == CAM_AF_INACTIVE) {
+            // in the middle of focusing, just ignore it
+            break;
+        }
+        // update focus distance
+        mParameters.updateFocusDistances(&focus_data.focus_dist);
+
+        if ((CAM_AF_FOCUSED == focus_data.focus_state) &&
+                mParameters.isZSLMode()) {
+            QCameraPicChannel *pZSLChannel =
+                    (QCameraPicChannel *)m_channels[QCAMERA_CH_TYPE_ZSL];
+            if (NULL != pZSLChannel) {
+                //flush the zsl-buffer
+                uint32_t flush_frame_idx = focus_data.focused_frame_idx;
+                CDBG("%s, flush the zsl-buffer before frame = %u.", __func__, flush_frame_idx);
+                pZSLChannel->flushSuperbuffer(flush_frame_idx);
+            }
+        }
+
+        ret = sendEvtNotify(CAMERA_MSG_FOCUS,
+                            (focus_data.focus_state == CAM_AF_FOCUSED)? true : false,
+                            0);
+        break;
+    case CAM_FOCUS_MODE_CONTINOUS_VIDEO:
+    case CAM_FOCUS_MODE_CONTINOUS_PICTURE:
+
+        // If the HAL focus mode is AUTO and AF focus mode is INFINITY, send event to app
+        if ((focusMode == CAM_FOCUS_MODE_CONTINOUS_PICTURE) &&
+                (focus_data.focus_mode == CAM_FOCUS_MODE_INFINITY) &&
+                (focus_data.focus_state == CAM_AF_INACTIVE)) {
+            ret = sendEvtNotify(CAMERA_MSG_FOCUS, false, 0);
+            break;
+        }
+
+        if (focus_data.focus_state == CAM_AF_FOCUSED ||
+            focus_data.focus_state == CAM_AF_NOT_FOCUSED) {
+            // update focus distance
+            mParameters.updateFocusDistances(&focus_data.focus_dist);
+
+            if ((focusMode == CAM_FOCUS_MODE_CONTINOUS_PICTURE) &&
+                    (CAM_AF_FOCUSED == focus_data.focus_state) &&
+                    mParameters.isZSLMode()) {
+                QCameraPicChannel *pZSLChannel =
+                        (QCameraPicChannel *)m_channels[QCAMERA_CH_TYPE_ZSL];
+                if (NULL != pZSLChannel) {
+                    //flush the zsl-buffer
+                    uint32_t flush_frame_idx = focus_data.focused_frame_idx;
+                    CDBG("%s, flush the zsl-buffer before frame = %u.", __func__, flush_frame_idx);
+                    pZSLChannel->flushSuperbuffer(flush_frame_idx);
+                }
+            }
+
+            ret = sendEvtNotify(CAMERA_MSG_FOCUS,
+                  (focus_data.focus_state == CAM_AF_FOCUSED)? true : false,
+                  0);
+        }
+        ret = sendEvtNotify(CAMERA_MSG_FOCUS_MOVE,
+                (focus_data.focus_state == CAM_AF_SCANNING)? true : false,
+                0);
+        break;
+    case CAM_FOCUS_MODE_INFINITY:
+    case CAM_FOCUS_MODE_FIXED:
+    case CAM_FOCUS_MODE_EDOF:
+    default:
+        CDBG_HIGH("%s: no ops for autofocus event in focusmode %d", __func__, focusMode);
+        break;
+    }
+
+    CDBG_HIGH("%s: X",__func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : processZoomEvent
+ *
+ * DESCRIPTION: process zoom event
+ *
+ * PARAMETERS :
+ *   @crop_info : crop info as a result of zoom operation
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processZoomEvent(cam_crop_data_t &crop_info)
+{
+    int32_t ret = NO_ERROR;
+
+    for (int i = 0; i < QCAMERA_CH_TYPE_MAX; i++) {
+        if (m_channels[i] != NULL) {
+            ret = m_channels[i]->processZoomDone(mPreviewWindow, crop_info);
+        }
+    }
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : processZSLCaptureDone
+ *
+ * DESCRIPTION: process ZSL capture done events
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processZSLCaptureDone()
+{
+    int rc = NO_ERROR;
+
+    pthread_mutex_lock(&m_parm_lock);
+    if (++mInputCount >= mParameters.getBurstCountForAdvancedCapture()) {
+        rc = unconfigureAdvancedCapture();
+    }
+    pthread_mutex_unlock(&m_parm_lock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : processRetroAECUnlock
+ *
+ * DESCRIPTION: process retro burst AEC unlock events
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processRetroAECUnlock()
+{
+    int rc = NO_ERROR;
+
+    CDBG_HIGH("%s : [ZSL Retro] LED assisted AF Release AEC Lock", __func__);
+    pthread_mutex_lock(&m_parm_lock);
+    rc = mParameters.setAecLock("false");
+    if (NO_ERROR != rc) {
+        ALOGE("%s: Error setting AEC lock", __func__);
+        pthread_mutex_unlock(&m_parm_lock);
+        return rc;
+    }
+
+    rc = mParameters.commitParameters();
+    if (NO_ERROR != rc) {
+        ALOGE("%s: Error during camera parameter commit", __func__);
+    } else {
+        m_bLedAfAecLock = FALSE;
+    }
+
+    pthread_mutex_unlock(&m_parm_lock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : processHDRData
+ *
+ * DESCRIPTION: process HDR scene events
+ *
+ * PARAMETERS :
+ *   @hdr_scene : HDR scene event data
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processHDRData(cam_asd_hdr_scene_data_t hdr_scene)
+{
+    int rc = NO_ERROR;
+
+#ifndef VANILLA_HAL
+    if (hdr_scene.is_hdr_scene &&
+      (hdr_scene.hdr_confidence > HDR_CONFIDENCE_THRESHOLD) &&
+      mParameters.isAutoHDREnabled()) {
+        m_HDRSceneEnabled = true;
+    } else {
+        m_HDRSceneEnabled = false;
+    }
+    pthread_mutex_lock(&m_parm_lock);
+    mParameters.setHDRSceneEnable(m_HDRSceneEnabled);
+    pthread_mutex_unlock(&m_parm_lock);
+
+    if ( msgTypeEnabled(CAMERA_MSG_META_DATA) ) {
+
+        size_t data_len = sizeof(int);
+        size_t buffer_len = 1 *sizeof(int)       //meta type
+                          + 1 *sizeof(int)       //data len
+                          + 1 *sizeof(int);      //data
+        camera_memory_t *hdrBuffer = mGetMemory(-1,
+                                                 buffer_len,
+                                                 1,
+                                                 mCallbackCookie);
+        if ( NULL == hdrBuffer ) {
+            ALOGE("%s: Not enough memory for auto HDR data",
+                  __func__);
+            return NO_MEMORY;
+        }
+
+        int *pHDRData = (int *)hdrBuffer->data;
+        if (pHDRData == NULL) {
+            ALOGE("%s: memory data ptr is NULL", __func__);
+            return UNKNOWN_ERROR;
+        }
+
+        pHDRData[0] = CAMERA_META_DATA_HDR;
+        pHDRData[1] = (int)data_len;
+        pHDRData[2] = m_HDRSceneEnabled;
+
+        qcamera_callback_argm_t cbArg;
+        memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+        cbArg.cb_type = QCAMERA_DATA_CALLBACK;
+        cbArg.msg_type = CAMERA_MSG_META_DATA;
+        cbArg.data = hdrBuffer;
+        cbArg.user_data = hdrBuffer;
+        cbArg.cookie = this;
+        cbArg.release_cb = releaseCameraMemory;
+        rc = m_cbNotifier.notifyCallback(cbArg);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: fail sending auto HDR notification", __func__);
+            hdrBuffer->release(hdrBuffer);
+        }
+    }
+
+    CDBG_HIGH("%s : hdr_scene_data: processHDRData: %d %f",
+          __func__,
+          hdr_scene.is_hdr_scene,
+          hdr_scene.hdr_confidence);
+
+#endif
+  return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : transAwbMetaToParams
+ *
+ * DESCRIPTION: translate awb params from metadata callback to QCameraParameters
+ *
+ * PARAMETERS :
+ *   @awb_params : awb params from metadata callback
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::transAwbMetaToParams(cam_awb_params_t &awb_params)
+{
+    pthread_mutex_lock(&m_parm_lock);
+    mParameters.updateAWBParams(awb_params);
+    pthread_mutex_unlock(&m_parm_lock);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : processPrepSnapshotDone
+ *
+ * DESCRIPTION: process prep snapshot done event
+ *
+ * PARAMETERS :
+ *   @prep_snapshot_state  : state of prepare snapshot done. In other words,
+ *                           i.e. whether need future frames for capture.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processPrepSnapshotDoneEvent(
+                        cam_prep_snapshot_state_t prep_snapshot_state)
+{
+    int32_t ret = NO_ERROR;
+
+    if (m_channels[QCAMERA_CH_TYPE_ZSL] &&
+        prep_snapshot_state == NEED_FUTURE_FRAME) {
+        CDBG_HIGH("%s: already handled in mm-camera-intf, no ops here", __func__);
+        if (isRetroPicture()) {
+            mParameters.setAecLock("true");
+            mParameters.commitParameters();
+            m_bLedAfAecLock = TRUE;
+        }
+    }
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : processASDUpdate
+ *
+ * DESCRIPTION: process ASD update event
+ *
+ * PARAMETERS :
+ *   @scene: selected scene mode
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processASDUpdate(cam_auto_scene_t scene)
+{
+    //set ASD parameter
+    mParameters.set(QCameraParameters::KEY_SELECTED_AUTO_SCENE, mParameters.getASDStateString(scene));
+
+    size_t data_len = sizeof(cam_auto_scene_t);
+    size_t buffer_len = 1 *sizeof(int)       //meta type
+                      + 1 *sizeof(int)       //data len
+                      + data_len;            //data
+    camera_memory_t *asdBuffer = mGetMemory(-1,
+                                             buffer_len,
+                                             1,
+                                             mCallbackCookie);
+    if ( NULL == asdBuffer ) {
+        ALOGE("%s: Not enough memory for histogram data", __func__);
+        return NO_MEMORY;
+    }
+
+    int *pASDData = (int *)asdBuffer->data;
+    if (pASDData == NULL) {
+        ALOGE("%s: memory data ptr is NULL", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+#ifndef VANILLA_HAL
+    pASDData[0] = CAMERA_META_DATA_ASD;
+    pASDData[1] = (int)data_len;
+    pASDData[2] = scene;
+
+    qcamera_callback_argm_t cbArg;
+    memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+    cbArg.cb_type = QCAMERA_DATA_CALLBACK;
+    cbArg.msg_type = CAMERA_MSG_META_DATA;
+    cbArg.data = asdBuffer;
+    cbArg.user_data = asdBuffer;
+    cbArg.cookie = this;
+    cbArg.release_cb = releaseCameraMemory;
+    int32_t rc = m_cbNotifier.notifyCallback(cbArg);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: fail sending notification", __func__);
+        asdBuffer->release(asdBuffer);
+    }
+#endif
+    return NO_ERROR;
+
+}
+
+/*===========================================================================
+ * FUNCTION   : processJpegNotify
+ *
+ * DESCRIPTION: process jpeg event
+ *
+ * PARAMETERS :
+ *   @jpeg_evt: ptr to jpeg event payload
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processJpegNotify(qcamera_jpeg_evt_payload_t *jpeg_evt)
+{
+    return m_postprocessor.processJpegEvt(jpeg_evt);
+}
+
+/*===========================================================================
+ * FUNCTION   : lockAPI
+ *
+ * DESCRIPTION: lock to process API
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::lockAPI()
+{
+    pthread_mutex_lock(&m_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : waitAPIResult
+ *
+ * DESCRIPTION: wait for API result coming back. This is a blocking call, it will
+ *              return only cerntain API event type arrives
+ *
+ * PARAMETERS :
+ *   @api_evt : API event type
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::waitAPIResult(qcamera_sm_evt_enum_t api_evt,
+        qcamera_api_result_t *apiResult)
+{
+    CDBG("%s: wait for API result of evt (%d)", __func__, api_evt);
+    int resultReceived = 0;
+    while  (!resultReceived) {
+        pthread_cond_wait(&m_cond, &m_lock);
+        if (m_apiResultList != NULL) {
+            api_result_list *apiResultList = m_apiResultList;
+            api_result_list *apiResultListPrevious = m_apiResultList;
+            while (apiResultList != NULL) {
+                if (apiResultList->result.request_api == api_evt) {
+                    resultReceived = 1;
+                    *apiResult = apiResultList->result;
+                    apiResultListPrevious->next = apiResultList->next;
+                    if (apiResultList == m_apiResultList) {
+                        m_apiResultList = apiResultList->next;
+                    }
+                    free(apiResultList);
+                    break;
+                }
+                else {
+                    apiResultListPrevious = apiResultList;
+                    apiResultList = apiResultList->next;
+                }
+            }
+        }
+    }
+    CDBG("%s: return (%d) from API result wait for evt (%d)",
+          __func__, apiResult->status, api_evt);
+}
+
+
+/*===========================================================================
+ * FUNCTION   : unlockAPI
+ *
+ * DESCRIPTION: API processing is done, unlock
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::unlockAPI()
+{
+    pthread_mutex_unlock(&m_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : signalAPIResult
+ *
+ * DESCRIPTION: signal condition viarable that cerntain API event type arrives
+ *
+ * PARAMETERS :
+ *   @result  : API result
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::signalAPIResult(qcamera_api_result_t *result)
+{
+
+    pthread_mutex_lock(&m_lock);
+    api_result_list *apiResult = (api_result_list *)malloc(sizeof(api_result_list));
+    if (apiResult == NULL) {
+        ALOGE("%s: ERROR: malloc for api result failed", __func__);
+        ALOGE("%s: ERROR: api thread will wait forever fot this lost result", __func__);
+        goto malloc_failed;
+    }
+    apiResult->result = *result;
+    apiResult->next = NULL;
+    if (m_apiResultList == NULL) m_apiResultList = apiResult;
+    else {
+        api_result_list *apiResultList = m_apiResultList;
+        while(apiResultList->next != NULL) apiResultList = apiResultList->next;
+        apiResultList->next = apiResult;
+    }
+malloc_failed:
+    pthread_cond_broadcast(&m_cond);
+    pthread_mutex_unlock(&m_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : signalEvtResult
+ *
+ * DESCRIPTION: signal condition variable that certain event was processed
+ *
+ * PARAMETERS :
+ *   @result  : Event result
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::signalEvtResult(qcamera_api_result_t *result)
+{
+    pthread_mutex_lock(&m_evtLock);
+    m_evtResult = *result;
+    pthread_cond_signal(&m_evtCond);
+    pthread_mutex_unlock(&m_evtLock);
+}
+
+int32_t QCamera2HardwareInterface::prepareRawStream(QCameraChannel *curChannel)
+{
+    int32_t rc = NO_ERROR;
+    cam_dimension_t str_dim,max_dim;
+    QCameraChannel *pChannel;
+
+    max_dim.width = 0;
+    max_dim.height = 0;
+
+    for (int j = 0; j < QCAMERA_CH_TYPE_MAX; j++) {
+        if (m_channels[j] != NULL) {
+            pChannel = m_channels[j];
+            for (uint8_t i = 0; i < pChannel->getNumOfStreams(); i++) {
+                QCameraStream *pStream = pChannel->getStreamByIndex(i);
+                if (pStream != NULL) {
+                    if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                        continue;
+                    }
+                    pStream->getFrameDimension(str_dim);
+                    if (str_dim.width > max_dim.width) {
+                        max_dim.width = str_dim.width;
+                    }
+                    if (str_dim.height > max_dim.height) {
+                        max_dim.height = str_dim.height;
+                    }
+                }
+            }
+        }
+    }
+
+    for (uint8_t i = 0; i < curChannel->getNumOfStreams(); i++) {
+        QCameraStream *pStream = curChannel->getStreamByIndex(i);
+        if (pStream != NULL) {
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                continue;
+            }
+            pStream->getFrameDimension(str_dim);
+            if (str_dim.width > max_dim.width) {
+                max_dim.width = str_dim.width;
+            }
+            if (str_dim.height > max_dim.height) {
+                max_dim.height = str_dim.height;
+            }
+        }
+    }
+    rc = mParameters.updateRAW(max_dim);
+    return rc;
+}
+/*===========================================================================
+ * FUNCTION   : addStreamToChannel
+ *
+ * DESCRIPTION: add a stream into a channel
+ *
+ * PARAMETERS :
+ *   @pChannel   : ptr to channel obj
+ *   @streamType : type of stream to be added
+ *   @streamCB   : callback of stream
+ *   @userData   : user data ptr to callback
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addStreamToChannel(QCameraChannel *pChannel,
+                                                      cam_stream_type_t streamType,
+                                                      stream_cb_routine streamCB,
+                                                      void *userData)
+{
+    int32_t rc = NO_ERROR;
+
+    if (streamType == CAM_STREAM_TYPE_RAW) {
+        prepareRawStream(pChannel);
+    }
+    QCameraHeapMemory *pStreamInfo = allocateStreamInfoBuf(streamType);
+    if (pStreamInfo == NULL) {
+        ALOGE("%s: no mem for stream info buf", __func__);
+        return NO_MEMORY;
+    }
+    uint8_t minStreamBufNum = getBufNumRequired(streamType);
+    bool bDynAllocBuf = false;
+    if (isZSLMode() && streamType == CAM_STREAM_TYPE_SNAPSHOT) {
+        bDynAllocBuf = true;
+    }
+
+    if ( ( streamType == CAM_STREAM_TYPE_SNAPSHOT ||
+            streamType == CAM_STREAM_TYPE_POSTVIEW ||
+            streamType == CAM_STREAM_TYPE_METADATA ||
+            streamType == CAM_STREAM_TYPE_RAW) &&
+            !isZSLMode() &&
+            !isLongshotEnabled() &&
+            !mParameters.getRecordingHintValue() &&
+            !mParameters.isSecureMode()) {
+        rc = pChannel->addStream(*this,
+                pStreamInfo,
+                NULL,
+                minStreamBufNum,
+                &gCamCaps[mCameraId]->padding_info,
+                streamCB, userData,
+                bDynAllocBuf,
+                true);
+
+        // Queue buffer allocation for Snapshot and Metadata streams
+        if ( !rc ) {
+            DefferWorkArgs args;
+            DefferAllocBuffArgs allocArgs;
+
+            memset(&args, 0, sizeof(DefferWorkArgs));
+            memset(&allocArgs, 0, sizeof(DefferAllocBuffArgs));
+            allocArgs.type = streamType;
+            allocArgs.ch = pChannel;
+            args.allocArgs = allocArgs;
+
+            if (streamType == CAM_STREAM_TYPE_SNAPSHOT) {
+                mSnapshotJob = queueDefferedWork(CMD_DEFF_ALLOCATE_BUFF,
+                        args);
+
+                if ( mSnapshotJob == -1) {
+                    rc = UNKNOWN_ERROR;
+                }
+            } else if (streamType == CAM_STREAM_TYPE_METADATA) {
+                mMetadataJob = queueDefferedWork(CMD_DEFF_ALLOCATE_BUFF,
+                        args);
+
+                if ( mMetadataJob == -1) {
+                    rc = UNKNOWN_ERROR;
+                }
+            } else if (streamType == CAM_STREAM_TYPE_RAW) {
+                mRawdataJob = queueDefferedWork(CMD_DEFF_ALLOCATE_BUFF,
+                        args);
+
+                if ( mRawdataJob == -1) {
+                    rc = UNKNOWN_ERROR;
+                }
+            }
+        }
+    } else if (streamType == CAM_STREAM_TYPE_ANALYSIS) {
+        rc = pChannel->addStream(*this,
+                pStreamInfo,
+                NULL,
+                minStreamBufNum,
+                &gCamCaps[mCameraId]->analysis_padding_info,
+                streamCB, userData,
+                bDynAllocBuf,
+                false);
+    } else {
+        rc = pChannel->addStream(*this,
+                pStreamInfo,
+                NULL,
+                minStreamBufNum,
+                &gCamCaps[mCameraId]->padding_info,
+                streamCB, userData,
+                bDynAllocBuf,
+                false);
+    }
+
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add stream type (%d) failed, ret = %d",
+              __func__, streamType, rc);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addPreviewChannel
+ *
+ * DESCRIPTION: add a preview channel that contains a preview stream
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addPreviewChannel()
+{
+    int32_t rc = NO_ERROR;
+    QCameraChannel *pChannel = NULL;
+
+    if (m_channels[QCAMERA_CH_TYPE_PREVIEW] != NULL) {
+        // if we had preview channel before, delete it first
+        delete m_channels[QCAMERA_CH_TYPE_PREVIEW];
+        m_channels[QCAMERA_CH_TYPE_PREVIEW] = NULL;
+    }
+
+    pChannel = new QCameraChannel(mCameraHandle->camera_handle,
+                                  mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for preview channel", __func__);
+        return NO_MEMORY;
+    }
+
+    // preview only channel, don't need bundle attr and cb
+    rc = pChannel->init(NULL, NULL, NULL);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: init preview channel failed, ret = %d", __func__, rc);
+        return rc;
+    }
+
+    // meta data stream always coexists with preview if applicable
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_METADATA,
+                            metadata_stream_cb_routine, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add metadata stream failed, ret = %d", __func__, rc);
+        return rc;
+    }
+
+    if (mParameters.getRecordingHintValue() != true && !mParameters.isSecureMode()) {
+        rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_ANALYSIS,
+                NULL, this);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: add Analysis stream failed, ret = %d", __func__, rc);
+            return rc;
+        }
+    }
+
+    if (isRdiMode()) {
+        CDBG_HIGH("RDI_DEBUG %s[%d]: Add stream to channel", __func__, __LINE__);
+        rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_RAW,
+                                rdi_mode_stream_cb_routine, this);
+    } else {
+        if (isNoDisplayMode()) {
+            rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_PREVIEW,
+                                    nodisplay_preview_stream_cb_routine, this);
+        } else {
+            rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_PREVIEW,
+                                    preview_stream_cb_routine, this);
+        }
+    }
+
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add preview stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    m_channels[QCAMERA_CH_TYPE_PREVIEW] = pChannel;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addVideoChannel
+ *
+ * DESCRIPTION: add a video channel that contains a video stream
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addVideoChannel()
+{
+    int32_t rc = NO_ERROR;
+    QCameraVideoChannel *pChannel = NULL;
+
+    if (m_channels[QCAMERA_CH_TYPE_VIDEO] != NULL) {
+        // if we had video channel before, delete it first
+        delete m_channels[QCAMERA_CH_TYPE_VIDEO];
+        m_channels[QCAMERA_CH_TYPE_VIDEO] = NULL;
+    }
+
+    pChannel = new QCameraVideoChannel(mCameraHandle->camera_handle,
+                                       mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for video channel", __func__);
+        return NO_MEMORY;
+    }
+
+    // preview only channel, don't need bundle attr and cb
+    rc = pChannel->init(NULL, NULL, NULL);
+    if (rc != 0) {
+        ALOGE("%s: init video channel failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_VIDEO,
+                            video_stream_cb_routine, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add video stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    m_channels[QCAMERA_CH_TYPE_VIDEO] = pChannel;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addSnapshotChannel
+ *
+ * DESCRIPTION: add a snapshot channel that contains a snapshot stream
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ * NOTE       : Add this channel for live snapshot usecase. Regular capture will
+ *              use addCaptureChannel.
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addSnapshotChannel()
+{
+    int32_t rc = NO_ERROR;
+    QCameraChannel *pChannel = NULL;
+
+    if (m_channels[QCAMERA_CH_TYPE_SNAPSHOT] != NULL) {
+        // if we had ZSL channel before, delete it first
+        delete m_channels[QCAMERA_CH_TYPE_SNAPSHOT];
+        m_channels[QCAMERA_CH_TYPE_SNAPSHOT] = NULL;
+    }
+
+    pChannel = new QCameraChannel(mCameraHandle->camera_handle,
+                                  mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for snapshot channel", __func__);
+        return NO_MEMORY;
+    }
+
+    mm_camera_channel_attr_t attr;
+    memset(&attr, 0, sizeof(mm_camera_channel_attr_t));
+    attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS;
+    attr.look_back = mParameters.getZSLBackLookCount();
+    attr.post_frame_skip = mParameters.getZSLBurstInterval();
+    attr.water_mark = mParameters.getZSLQueueDepth();
+    attr.max_unmatched_frames = mParameters.getMaxUnmatchedFramesInQueue();
+    attr.priority = MM_CAMERA_SUPER_BUF_PRIORITY_LOW;
+    rc = pChannel->init(&attr, snapshot_channel_cb_routine, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: init snapshot channel failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_SNAPSHOT,
+            NULL, NULL);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add snapshot stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    m_channels[QCAMERA_CH_TYPE_SNAPSHOT] = pChannel;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addRawChannel
+ *
+ * DESCRIPTION: add a raw channel that contains a raw image stream
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addRawChannel()
+{
+    int32_t rc = NO_ERROR;
+    QCameraChannel *pChannel = NULL;
+
+    if (m_channels[QCAMERA_CH_TYPE_RAW] != NULL) {
+        // if we had raw channel before, delete it first
+        delete m_channels[QCAMERA_CH_TYPE_RAW];
+        m_channels[QCAMERA_CH_TYPE_RAW] = NULL;
+    }
+
+    pChannel = new QCameraChannel(mCameraHandle->camera_handle,
+                                  mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for raw channel", __func__);
+        return NO_MEMORY;
+    }
+
+    rc = pChannel->init(NULL, NULL, NULL);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: init raw channel failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    // meta data stream always coexists with snapshot in regular RAW capture case
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_METADATA,
+                            metadata_stream_cb_routine, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add metadata stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+    waitDefferedWork(mMetadataJob);
+
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_RAW,
+                            raw_stream_cb_routine, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add snapshot stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+    waitDefferedWork(mRawdataJob);
+    m_channels[QCAMERA_CH_TYPE_RAW] = pChannel;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addZSLChannel
+ *
+ * DESCRIPTION: add a ZSL channel that contains a preview stream and
+ *              a snapshot stream
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addZSLChannel()
+{
+    int32_t rc = NO_ERROR;
+    QCameraPicChannel *pChannel = NULL;
+    char value[PROPERTY_VALUE_MAX];
+    bool raw_yuv = false;
+
+    if (m_channels[QCAMERA_CH_TYPE_ZSL] != NULL) {
+        // if we had ZSL channel before, delete it first
+        delete m_channels[QCAMERA_CH_TYPE_ZSL];
+        m_channels[QCAMERA_CH_TYPE_ZSL] = NULL;
+    }
+
+    pChannel = new QCameraPicChannel(mCameraHandle->camera_handle,
+                                     mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for ZSL channel", __func__);
+        return NO_MEMORY;
+    }
+
+    // ZSL channel, init with bundle attr and cb
+    mm_camera_channel_attr_t attr;
+    memset(&attr, 0, sizeof(mm_camera_channel_attr_t));
+    if (mParameters.isSceneSelectionEnabled()) {
+        attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS;
+    } else {
+        attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_BURST;
+    }
+    attr.look_back = mParameters.getZSLBackLookCount();
+    attr.post_frame_skip = mParameters.getZSLBurstInterval();
+    attr.water_mark = mParameters.getZSLQueueDepth();
+    attr.max_unmatched_frames = mParameters.getMaxUnmatchedFramesInQueue();
+    rc = pChannel->init(&attr,
+                        zsl_channel_cb,
+                        this);
+    if (rc != 0) {
+        ALOGE("%s: init ZSL channel failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    // meta data stream always coexists with preview if applicable
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_METADATA,
+                            metadata_stream_cb_routine, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add metadata stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    if (isNoDisplayMode()) {
+        rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_PREVIEW,
+                                nodisplay_preview_stream_cb_routine, this);
+    } else {
+        rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_PREVIEW,
+                                preview_stream_cb_routine, this);
+    }
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add preview stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_SNAPSHOT,
+                            NULL, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add snapshot stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    if (!mParameters.isSecureMode()) {
+        rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_ANALYSIS,
+                NULL, this);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: add Analysis stream failed, ret = %d", __func__, rc);
+            delete pChannel;
+            return rc;
+        }
+    }
+
+    property_get("persist.camera.raw_yuv", value, "0");
+    raw_yuv = atoi(value) > 0 ? true : false;
+    if ( raw_yuv ) {
+        rc = addStreamToChannel(pChannel,
+                                CAM_STREAM_TYPE_RAW,
+                                NULL,
+                                this);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: add raw stream failed, ret = %d", __func__, rc);
+            delete pChannel;
+            return rc;
+        }
+    }
+
+    m_channels[QCAMERA_CH_TYPE_ZSL] = pChannel;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addCaptureChannel
+ *
+ * DESCRIPTION: add a capture channel that contains a snapshot stream
+ *              and a postview stream
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ * NOTE       : Add this channel for regular capture usecase.
+ *              For Live snapshot usecase, use addSnapshotChannel.
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addCaptureChannel()
+{
+    int32_t rc = NO_ERROR;
+    QCameraPicChannel *pChannel = NULL;
+    char value[PROPERTY_VALUE_MAX];
+    bool raw_yuv = false;
+
+    if (m_channels[QCAMERA_CH_TYPE_CAPTURE] != NULL) {
+        delete m_channels[QCAMERA_CH_TYPE_CAPTURE];
+        m_channels[QCAMERA_CH_TYPE_CAPTURE] = NULL;
+    }
+
+    pChannel = new QCameraPicChannel(mCameraHandle->camera_handle,
+                                  mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for capture channel", __func__);
+        return NO_MEMORY;
+    }
+
+    // Capture channel, only need snapshot and postview streams start together
+    mm_camera_channel_attr_t attr;
+    memset(&attr, 0, sizeof(mm_camera_channel_attr_t));
+    if ( mLongshotEnabled ) {
+        attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_BURST;
+        attr.look_back = mParameters.getZSLBackLookCount();
+        attr.water_mark = mParameters.getZSLQueueDepth();
+    } else {
+        attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS;
+    }
+    attr.max_unmatched_frames = mParameters.getMaxUnmatchedFramesInQueue();
+
+    rc = pChannel->init(&attr,
+                        capture_channel_cb_routine,
+                        this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: init capture channel failed, ret = %d", __func__, rc);
+        return rc;
+    }
+
+    // meta data stream always coexists with snapshot in regular capture case
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_METADATA,
+                            metadata_stream_cb_routine, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add metadata stream failed, ret = %d", __func__, rc);
+        return rc;
+    }
+
+    if (!mLongshotEnabled) {
+        rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_POSTVIEW,
+                                NULL, this);
+
+        if (rc != NO_ERROR) {
+            ALOGE("%s: add postview stream failed, ret = %d", __func__, rc);
+            return rc;
+        }
+    } else {
+        rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_PREVIEW,
+                                preview_stream_cb_routine, this);
+
+        if (rc != NO_ERROR) {
+            ALOGE("%s: add preview stream failed, ret = %d", __func__, rc);
+            return rc;
+        }
+    }
+
+    if (!mParameters.getofflineRAW()) {
+        rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_SNAPSHOT,
+                NULL, this);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: add snapshot stream failed, ret = %d", __func__, rc);
+            return rc;
+        }
+    }
+    property_get("persist.camera.raw_yuv", value, "0");
+    raw_yuv = atoi(value) > 0 ? true : false;
+    if ( raw_yuv ) {
+        if (!mParameters.getofflineRAW()) {
+            rc = addStreamToChannel(pChannel,
+                    CAM_STREAM_TYPE_RAW,
+                    snapshot_raw_stream_cb_routine,
+                    this);
+        } else {
+            rc = addStreamToChannel(pChannel,
+                    CAM_STREAM_TYPE_RAW,
+                    NULL,
+                    this);
+        }
+        if (rc != NO_ERROR) {
+            ALOGE("%s: add raw stream failed, ret = %d", __func__, rc);
+            return rc;
+        }
+    }
+
+    m_channels[QCAMERA_CH_TYPE_CAPTURE] = pChannel;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addMetaDataChannel
+ *
+ * DESCRIPTION: add a meta data channel that contains a metadata stream
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addMetaDataChannel()
+{
+    int32_t rc = NO_ERROR;
+    QCameraChannel *pChannel = NULL;
+
+    if (m_channels[QCAMERA_CH_TYPE_METADATA] != NULL) {
+        delete m_channels[QCAMERA_CH_TYPE_METADATA];
+        m_channels[QCAMERA_CH_TYPE_METADATA] = NULL;
+    }
+
+    pChannel = new QCameraChannel(mCameraHandle->camera_handle,
+                                  mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for metadata channel", __func__);
+        return NO_MEMORY;
+    }
+
+    rc = pChannel->init(NULL,
+                        NULL,
+                        NULL);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: init metadata channel failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_METADATA,
+                            metadata_stream_cb_routine, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add metadata stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    m_channels[QCAMERA_CH_TYPE_METADATA] = pChannel;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addAnalysisChannel
+ *
+ * DESCRIPTION: add a analysis channel that contains a analysis stream
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addAnalysisChannel()
+{
+    int32_t rc = NO_ERROR;
+    QCameraChannel *pChannel = NULL;
+
+    if (m_channels[QCAMERA_CH_TYPE_ANALYSIS] != NULL) {
+        delete m_channels[QCAMERA_CH_TYPE_ANALYSIS];
+        m_channels[QCAMERA_CH_TYPE_ANALYSIS] = NULL;
+    }
+
+    pChannel = new QCameraChannel(mCameraHandle->camera_handle,
+                                  mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for metadata channel", __func__);
+        return NO_MEMORY;
+    }
+
+    rc = pChannel->init(NULL, NULL, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: init Analysis channel failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    rc = addStreamToChannel(pChannel, CAM_STREAM_TYPE_ANALYSIS,
+                            NULL, this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add Analysis stream failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return rc;
+    }
+
+    m_channels[QCAMERA_CH_TYPE_ANALYSIS] = pChannel;
+    return rc;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : getPPConfig
+ *
+ * DESCRIPTION: get Post processing configaration data
+ *
+ * PARAMETERS :
+ * @pp config:  pp config structure pointer,
+ * @curCount:  current pp pass count
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::getPPConfig(cam_pp_feature_config_t &pp_config, int curCount)
+{
+    int32_t rc = NO_ERROR;
+
+    if ( curCount != mParameters.getReprocCount() ) {
+        ALOGW("%s : Multi pass enabled. Total Pass = %d, cur Pass = %d", __func__,
+                mParameters.getReprocCount(), curCount);
+    }
+
+    CDBG_HIGH("%s: Minimum pproc feature mask required = %x", __func__,
+            gCamCaps[mCameraId]->min_required_pp_mask);
+    uint32_t required_mask = gCamCaps[mCameraId]->min_required_pp_mask;
+    int32_t zoomLevel = 0;
+
+    switch(curCount) {
+        case 1:
+            //Configure feature mask for first pass of reprocessing
+            if (mParameters.isZSLMode() || required_mask & CAM_QCOM_FEATURE_PP_SUPERSET) {
+                if (gCamCaps[mCameraId]->min_required_pp_mask & CAM_QCOM_FEATURE_EFFECT) {
+                    pp_config.feature_mask |= CAM_QCOM_FEATURE_EFFECT;
+                    pp_config.effect = mParameters.getEffectValue();
+                }
+                if ((gCamCaps[mCameraId]->min_required_pp_mask & CAM_QCOM_FEATURE_SHARPNESS) &&
+                        !mParameters.isOptiZoomEnabled()) {
+                    pp_config.feature_mask |= CAM_QCOM_FEATURE_SHARPNESS;
+                    pp_config.sharpness = mParameters.getInt(QCameraParameters::KEY_QC_SHARPNESS);
+                }
+
+                if (gCamCaps[mCameraId]->min_required_pp_mask & CAM_QCOM_FEATURE_CROP) {
+                    pp_config.feature_mask |= CAM_QCOM_FEATURE_CROP;
+                }
+
+                if (mParameters.isWNREnabled()) {
+                    pp_config.feature_mask |= CAM_QCOM_FEATURE_DENOISE2D;
+                    pp_config.denoise2d.denoise_enable = 1;
+                    pp_config.denoise2d.process_plates =
+                            mParameters.getDenoiseProcessPlate(CAM_INTF_PARM_WAVELET_DENOISE);
+                }
+                if (required_mask & CAM_QCOM_FEATURE_ROTATION) {
+                    pp_config.feature_mask |= CAM_QCOM_FEATURE_ROTATION;
+                }
+                if (gCamCaps[mCameraId]->min_required_pp_mask & CAM_QCOM_FEATURE_SCALE) {
+                    pp_config.feature_mask |= CAM_QCOM_FEATURE_SCALE;
+                }
+            }
+
+            if (isCACEnabled()) {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_CAC;
+            }
+
+            if (needRotationReprocess()) {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_ROTATION;
+                uint32_t rotation = mParameters.getJpegRotation();
+                if (rotation == 0) {
+                    pp_config.rotation = ROTATE_0;
+                } else if (rotation == 90) {
+                    pp_config.rotation = ROTATE_90;
+                } else if (rotation == 180) {
+                    pp_config.rotation = ROTATE_180;
+                } else if (rotation == 270) {
+                    pp_config.rotation = ROTATE_270;
+                }
+            }
+
+            if (mParameters.isHDREnabled()){
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_HDR;
+                pp_config.hdr_param.hdr_enable = 1;
+                pp_config.hdr_param.hdr_need_1x = mParameters.isHDR1xFrameEnabled();
+                pp_config.hdr_param.hdr_mode = CAM_HDR_MODE_MULTIFRAME;
+            } else {
+                pp_config.feature_mask &= ~CAM_QCOM_FEATURE_HDR;
+                pp_config.hdr_param.hdr_enable = 0;
+            }
+
+            if(needScaleReprocess()){
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_SCALE;
+                mParameters.m_reprocScaleParam.getPicSizeFromAPK(
+                        pp_config.scale_param.output_width,
+                        pp_config.scale_param.output_height);
+            }
+
+            if(mParameters.isUbiFocusEnabled()) {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_UBIFOCUS;
+            } else {
+                pp_config.feature_mask &= ~CAM_QCOM_FEATURE_UBIFOCUS;
+            }
+
+            if(mParameters.isUbiRefocus()) {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_REFOCUS;
+                pp_config.misc_buf_param.misc_buffer_index = 0;
+            } else {
+                pp_config.feature_mask &= ~CAM_QCOM_FEATURE_REFOCUS;
+            }
+
+            if(mParameters.isChromaFlashEnabled()) {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_CHROMA_FLASH;
+                pp_config.flash_value = CAM_FLASH_ON;
+            } else {
+                pp_config.feature_mask &= ~CAM_QCOM_FEATURE_CHROMA_FLASH;
+            }
+
+            zoomLevel = mParameters.getParmZoomLevel();
+            if(mParameters.isOptiZoomEnabled() && (0 <= zoomLevel)) {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_OPTIZOOM;
+                pp_config.zoom_level = (uint8_t) zoomLevel;
+            } else {
+                pp_config.feature_mask &= ~CAM_QCOM_FEATURE_OPTIZOOM;
+            }
+
+            if (mParameters.getofflineRAW()) {
+                memset(&pp_config, 0, sizeof(cam_pp_feature_config_t));
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_RAW_PROCESSING;
+            }
+
+            if (mParameters.isTruePortraitEnabled()) {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_TRUEPORTRAIT;
+                pp_config.misc_buf_param.misc_buffer_index = 0;
+            } else {
+                pp_config.feature_mask &= ~CAM_QCOM_FEATURE_TRUEPORTRAIT;
+            }
+
+            if(mParameters.isStillMoreEnabled()) {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_STILLMORE;
+            } else {
+                pp_config.feature_mask &= ~CAM_QCOM_FEATURE_STILLMORE;
+            }
+
+            if (curCount != mParameters.getReprocCount()) {
+                pp_config.feature_mask &= ~CAM_QCOM_FEATURE_PP_PASS_2;
+                pp_config.feature_mask &= ~CAM_QCOM_FEATURE_ROTATION;
+                pp_config.rotation = ROTATE_0;
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_CROP;
+            } else {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_SCALE;
+            }
+            break;
+
+        case 2:
+            //Configure feature mask for second pass of reprocessing
+            pp_config.feature_mask |= CAM_QCOM_FEATURE_PP_PASS_2;
+            if (needRotationReprocess()) {
+                pp_config.feature_mask |= CAM_QCOM_FEATURE_ROTATION;
+                uint32_t rotation = mParameters.getJpegRotation();
+                if (rotation == 0) {
+                    pp_config.rotation = ROTATE_0;
+                } else if (rotation == 90) {
+                    pp_config.rotation = ROTATE_90;
+                } else if (rotation == 180) {
+                    pp_config.rotation = ROTATE_180;
+                } else if (rotation == 270) {
+                    pp_config.rotation = ROTATE_270;
+                }
+            }
+            break;
+
+    }
+    CDBG_HIGH("%s: pproc feature mask set = %x pass count = %d",
+            __func__, pp_config.feature_mask,curCount);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addReprocChannel
+ *
+ * DESCRIPTION: add a reprocess channel that will do reprocess on frames
+ *              coming from input channel
+ *
+ * PARAMETERS :
+ *   @pInputChannel : ptr to input channel whose frames will be post-processed
+ *
+ * RETURN     : Ptr to the newly created channel obj. NULL if failed.
+ *==========================================================================*/
+QCameraReprocessChannel *QCamera2HardwareInterface::addReprocChannel(
+                                                      QCameraChannel *pInputChannel)
+{
+    int32_t rc = NO_ERROR;
+    QCameraReprocessChannel *pChannel = NULL;
+
+    if (pInputChannel == NULL) {
+        ALOGE("%s: input channel obj is NULL", __func__);
+        return NULL;
+    }
+
+    pChannel = new QCameraReprocessChannel(mCameraHandle->camera_handle,
+                                           mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for reprocess channel", __func__);
+        return NULL;
+    }
+
+    // Capture channel, only need snapshot and postview streams start together
+    mm_camera_channel_attr_t attr;
+    memset(&attr, 0, sizeof(mm_camera_channel_attr_t));
+    attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS;
+    attr.max_unmatched_frames = mParameters.getMaxUnmatchedFramesInQueue();
+    rc = pChannel->init(&attr,
+                        postproc_channel_cb_routine,
+                        this);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: init reprocess channel failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return NULL;
+    }
+
+    // pp feature config
+    cam_pp_feature_config_t pp_config;
+    memset(&pp_config, 0, sizeof(cam_pp_feature_config_t));
+
+    rc = getPPConfig(pp_config, mParameters.getCurPPCount());
+    if (rc != NO_ERROR){
+        ALOGE("%s: Error while creating PP config",__func__);
+        delete pChannel;
+        return NULL;
+    }
+
+    uint8_t minStreamBufNum = getBufNumRequired(CAM_STREAM_TYPE_OFFLINE_PROC);
+
+    //WNR and HDR happen inline. No extra buffers needed.
+    uint32_t temp_feature_mask = pp_config.feature_mask;
+    temp_feature_mask &= ~CAM_QCOM_FEATURE_HDR;
+    if (temp_feature_mask && mParameters.isHDREnabled()) {
+        minStreamBufNum = (uint8_t)(1 + mParameters.getNumOfExtraHDRInBufsIfNeeded());
+    }
+
+    if (mParameters.isStillMoreEnabled()) {
+        cam_still_more_t stillmore_config = mParameters.getStillMoreSettings();
+        pp_config.burst_cnt = stillmore_config.burst_count;
+        CDBG_HIGH("%s: Stillmore burst %d", __func__, pp_config.burst_cnt);
+
+        // getNumOfExtraBuffersForImageProc returns 1 less buffer assuming
+        // number of capture is already added. In the case of liveshot,
+        // stillmore burst is 1. This is to account for the premature decrement
+        if (mParameters.getNumOfExtraBuffersForImageProc() == 0) {
+            minStreamBufNum += 1;
+        }
+    }
+
+    // Add non inplace image lib buffers only when ppproc is present,
+    // becuase pproc is non inplace and input buffers for img lib
+    // are output for pproc and this number of extra buffers is required
+    // If pproc is not there, input buffers for imglib are from snapshot stream
+    uint8_t imglib_extra_bufs = mParameters.getNumOfExtraBuffersForImageProc();
+    if (temp_feature_mask && imglib_extra_bufs) {
+        // 1 is added because getNumOfExtraBuffersForImageProc returns extra
+        // buffers assuming number of capture is already added
+        minStreamBufNum = (uint8_t)(minStreamBufNum + imglib_extra_bufs + 1);
+    }
+
+    // If input channel is Snapshot Channel, then update feature mask
+    if (pInputChannel == m_channels[QCAMERA_CH_TYPE_SNAPSHOT]) {
+        //Mask out features that are already processed in snapshot stream.
+        uint32_t snapshot_feature_mask = 0;
+        mParameters.getStreamPpMask(CAM_STREAM_TYPE_SNAPSHOT, snapshot_feature_mask);
+
+        pp_config.feature_mask &= ~snapshot_feature_mask;
+        ALOGI("%s: Snapshot feature mask: 0x%x, reproc feature mask: 0x%x", __func__,
+                snapshot_feature_mask, pp_config.feature_mask);
+    }
+
+    bool offlineReproc = isRegularCapture();
+    rc = pChannel->addReprocStreamsFromSource(*this,
+                                              pp_config,
+                                              pInputChannel,
+                                              minStreamBufNum,
+                                              mParameters.getNumOfSnapshots(),
+                                              &gCamCaps[mCameraId]->padding_info,
+                                              mParameters,
+                                              mLongshotEnabled,
+                                              offlineReproc);
+    if (rc != NO_ERROR) {
+        delete pChannel;
+        return NULL;
+    }
+
+    return pChannel;
+}
+
+/*===========================================================================
+ * FUNCTION   : addOfflineReprocChannel
+ *
+ * DESCRIPTION: add a offline reprocess channel contains one reproc stream,
+ *              that will do reprocess on frames coming from external images
+ *
+ * PARAMETERS :
+ *   @img_config  : offline reporcess image info
+ *   @pp_feature  : pp feature config
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+QCameraReprocessChannel *QCamera2HardwareInterface::addOfflineReprocChannel(
+                                            cam_pp_offline_src_config_t &img_config,
+                                            cam_pp_feature_config_t &pp_feature,
+                                            stream_cb_routine stream_cb,
+                                            void *userdata)
+{
+    int32_t rc = NO_ERROR;
+    QCameraReprocessChannel *pChannel = NULL;
+
+    pChannel = new QCameraReprocessChannel(mCameraHandle->camera_handle,
+                                           mCameraHandle->ops);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for reprocess channel", __func__);
+        return NULL;
+    }
+
+    rc = pChannel->init(NULL, NULL, NULL);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: init reprocess channel failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return NULL;
+    }
+
+    QCameraHeapMemory *pStreamInfo = allocateStreamInfoBuf(CAM_STREAM_TYPE_OFFLINE_PROC);
+    if (pStreamInfo == NULL) {
+        ALOGE("%s: no mem for stream info buf", __func__);
+        delete pChannel;
+        return NULL;
+    }
+
+    cam_stream_info_t *streamInfoBuf = (cam_stream_info_t *)pStreamInfo->getPtr(0);
+    memset(streamInfoBuf, 0, sizeof(cam_stream_info_t));
+    streamInfoBuf->stream_type = CAM_STREAM_TYPE_OFFLINE_PROC;
+    streamInfoBuf->fmt = img_config.input_fmt;
+    streamInfoBuf->dim = img_config.input_dim;
+    streamInfoBuf->buf_planes = img_config.input_buf_planes;
+    streamInfoBuf->streaming_mode = CAM_STREAMING_MODE_BURST;
+    streamInfoBuf->num_of_burst = img_config.num_of_bufs;
+
+    streamInfoBuf->reprocess_config.pp_type = CAM_OFFLINE_REPROCESS_TYPE;
+    streamInfoBuf->reprocess_config.offline = img_config;
+    streamInfoBuf->reprocess_config.pp_feature_config = pp_feature;
+
+    rc = pChannel->addStream(*this,
+            pStreamInfo, NULL, img_config.num_of_bufs,
+            &gCamCaps[mCameraId]->padding_info,
+            stream_cb, userdata, false);
+
+    if (rc != NO_ERROR) {
+        ALOGE("%s: add reprocess stream failed, ret = %d", __func__, rc);
+        pStreamInfo->deallocate();
+        delete pStreamInfo;
+        delete pChannel;
+        return NULL;
+    }
+
+    return pChannel;
+}
+
+/*===========================================================================
+ * FUNCTION   : addChannel
+ *
+ * DESCRIPTION: add a channel by its type
+ *
+ * PARAMETERS :
+ *   @ch_type : channel type
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::addChannel(qcamera_ch_type_enum_t ch_type)
+{
+    int32_t rc = UNKNOWN_ERROR;
+    switch (ch_type) {
+    case QCAMERA_CH_TYPE_ZSL:
+        rc = addZSLChannel();
+        break;
+    case QCAMERA_CH_TYPE_CAPTURE:
+        rc = addCaptureChannel();
+        break;
+    case QCAMERA_CH_TYPE_PREVIEW:
+        rc = addPreviewChannel();
+        break;
+    case QCAMERA_CH_TYPE_VIDEO:
+        rc = addVideoChannel();
+        break;
+    case QCAMERA_CH_TYPE_SNAPSHOT:
+        rc = addSnapshotChannel();
+        break;
+    case QCAMERA_CH_TYPE_RAW:
+        rc = addRawChannel();
+        break;
+    case QCAMERA_CH_TYPE_METADATA:
+        rc = addMetaDataChannel();
+        break;
+    case QCAMERA_CH_TYPE_ANALYSIS:
+        rc = addAnalysisChannel();
+        break;
+    default:
+        break;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : delChannel
+ *
+ * DESCRIPTION: delete a channel by its type
+ *
+ * PARAMETERS :
+ *   @ch_type : channel type
+ *   @destroy : delete context as well
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::delChannel(qcamera_ch_type_enum_t ch_type,
+                                              bool destroy)
+{
+    if (m_channels[ch_type] != NULL) {
+        if (destroy) {
+            delete m_channels[ch_type];
+            m_channels[ch_type] = NULL;
+        } else {
+            m_channels[ch_type]->deleteChannel();
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : startChannel
+ *
+ * DESCRIPTION: start a channel by its type
+ *
+ * PARAMETERS :
+ *   @ch_type : channel type
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::startChannel(qcamera_ch_type_enum_t ch_type)
+{
+    int32_t rc = UNKNOWN_ERROR;
+    if (m_channels[ch_type] != NULL) {
+        rc = m_channels[ch_type]->config();
+        if (NO_ERROR == rc) {
+            rc = m_channels[ch_type]->start();
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stopChannel
+ *
+ * DESCRIPTION: stop a channel by its type
+ *
+ * PARAMETERS :
+ *   @ch_type : channel type
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::stopChannel(qcamera_ch_type_enum_t ch_type)
+{
+    int32_t rc = UNKNOWN_ERROR;
+    if (m_channels[ch_type] != NULL) {
+        rc = m_channels[ch_type]->stop();
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : preparePreview
+ *
+ * DESCRIPTION: add channels needed for preview
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::preparePreview()
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+
+    pthread_mutex_lock(&m_parm_lock);
+    rc = mParameters.setStreamConfigure(false, false, false);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: setStreamConfigure failed %d", __func__, rc);
+        pthread_mutex_unlock(&m_parm_lock);
+        return rc;
+    }
+    pthread_mutex_unlock(&m_parm_lock);
+
+    if (mParameters.isZSLMode() && mParameters.getRecordingHintValue() != true) {
+        rc = addChannel(QCAMERA_CH_TYPE_ZSL);
+        if (rc != NO_ERROR) {
+            ALOGE("%s[%d]: failed!! rc = %d", __func__, __LINE__, rc);
+            return rc;
+        }
+    } else {
+        bool recordingHint = mParameters.getRecordingHintValue();
+        if(!isRdiMode() && recordingHint) {
+            rc = addChannel(QCAMERA_CH_TYPE_SNAPSHOT);
+            if (rc != NO_ERROR) {
+               return rc;
+            }
+            rc = addChannel(QCAMERA_CH_TYPE_VIDEO);
+            if (rc != NO_ERROR) {
+                delChannel(QCAMERA_CH_TYPE_SNAPSHOT);
+                ALOGE("%s[%d]:failed!! rc = %d", __func__, __LINE__, rc);
+                return rc;
+            }
+        }
+
+        rc = addChannel(QCAMERA_CH_TYPE_PREVIEW);
+        if (!isRdiMode() && (rc != NO_ERROR)) {
+            if (recordingHint) {
+                delChannel(QCAMERA_CH_TYPE_SNAPSHOT);
+                delChannel(QCAMERA_CH_TYPE_VIDEO);
+            }
+        }
+
+        if (!recordingHint && !mParameters.isSecureMode()) {
+            waitDefferedWork(mMetadataJob);
+            waitDefferedWork(mRawdataJob);
+        }
+
+        if (NO_ERROR != rc) {
+            delChannel(QCAMERA_CH_TYPE_PREVIEW);
+            ALOGE("%s[%d]:failed!! rc = %d", __func__, __LINE__, rc);
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : unpreparePreview
+ *
+ * DESCRIPTION: delete channels for preview
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::unpreparePreview()
+{
+    delChannel(QCAMERA_CH_TYPE_ZSL);
+    delChannel(QCAMERA_CH_TYPE_PREVIEW);
+    delChannel(QCAMERA_CH_TYPE_VIDEO);
+    delChannel(QCAMERA_CH_TYPE_SNAPSHOT);
+}
+
+/*===========================================================================
+ * FUNCTION   : playShutter
+ *
+ * DESCRIPTION: send request to play shutter sound
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::playShutter(){
+     if (mNotifyCb == NULL ||
+         msgTypeEnabledWithLock(CAMERA_MSG_SHUTTER) == 0){
+         CDBG("%s: shutter msg not enabled or NULL cb", __func__);
+         return;
+     }
+
+     qcamera_callback_argm_t cbArg;
+     memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+     cbArg.cb_type = QCAMERA_NOTIFY_CALLBACK;
+     cbArg.msg_type = CAMERA_MSG_SHUTTER;
+     cbArg.ext1 = 0;
+     cbArg.ext2 = false;
+     m_cbNotifier.notifyCallback(cbArg);
+}
+
+/*===========================================================================
+ * FUNCTION   : getChannelByHandle
+ *
+ * DESCRIPTION: return a channel by its handle
+ *
+ * PARAMETERS :
+ *   @channelHandle : channel handle
+ *
+ * RETURN     : a channel obj if found, NULL if not found
+ *==========================================================================*/
+QCameraChannel *QCamera2HardwareInterface::getChannelByHandle(uint32_t channelHandle)
+{
+    for(int i = 0; i < QCAMERA_CH_TYPE_MAX; i++) {
+        if (m_channels[i] != NULL &&
+            m_channels[i]->getMyHandle() == channelHandle) {
+            return m_channels[i];
+        }
+    }
+
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : processFaceDetectionReuslt
+ *
+ * DESCRIPTION: process face detection reuslt
+ *
+ * PARAMETERS :
+ *   @fd_data : ptr to face detection result struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processFaceDetectionResult(cam_face_detection_data_t *fd_data)
+{
+    if (!mParameters.isFaceDetectionEnabled()) {
+        CDBG_HIGH("%s: FaceDetection not enabled, no ops here", __func__);
+        return NO_ERROR;
+    }
+
+    qcamera_face_detect_type_t fd_type = fd_data->fd_type;
+    if ((NULL == mDataCb) ||
+        (fd_type == QCAMERA_FD_PREVIEW && !msgTypeEnabled(CAMERA_MSG_PREVIEW_METADATA))
+#ifndef VANILLA_HAL
+        || (fd_type == QCAMERA_FD_SNAPSHOT && !msgTypeEnabled(CAMERA_MSG_META_DATA))
+#endif
+        ) {
+        CDBG_HIGH("%s: metadata msgtype not enabled, no ops here", __func__);
+        return NO_ERROR;
+    }
+
+    cam_dimension_t display_dim;
+    mParameters.getStreamDimension(CAM_STREAM_TYPE_PREVIEW, display_dim);
+    if (display_dim.width <= 0 || display_dim.height <= 0) {
+        ALOGE("%s: Invalid preview width or height (%d x %d)",
+              __func__, display_dim.width, display_dim.height);
+        return UNKNOWN_ERROR;
+    }
+
+    // process face detection result
+    // need separate face detection in preview or snapshot type
+    size_t faceResultSize = 0;
+    size_t data_len = 0;
+    if(fd_type == QCAMERA_FD_PREVIEW){
+        //fd for preview frames
+        faceResultSize = sizeof(camera_frame_metadata_t);
+        faceResultSize += sizeof(camera_face_t) * MAX_ROI;
+    }else if(fd_type == QCAMERA_FD_SNAPSHOT){
+#ifndef VANILLA_HAL
+        // fd for snapshot frames
+        //check if face is detected in this frame
+        if(fd_data->num_faces_detected > 0){
+            data_len = sizeof(camera_frame_metadata_t) +
+                         sizeof(camera_face_t) * fd_data->num_faces_detected;
+        }else{
+            //no face
+            data_len = 0;
+        }
+#endif
+        faceResultSize = 1 *sizeof(int)    //meta data type
+                       + 1 *sizeof(int)    // meta data len
+                       + data_len;         //data
+    }
+
+    camera_memory_t *faceResultBuffer = mGetMemory(-1,
+                                                   faceResultSize,
+                                                   1,
+                                                   mCallbackCookie);
+    if ( NULL == faceResultBuffer ) {
+        ALOGE("%s: Not enough memory for face result data",
+              __func__);
+        return NO_MEMORY;
+    }
+
+    unsigned char *pFaceResult = ( unsigned char * ) faceResultBuffer->data;
+    memset(pFaceResult, 0, faceResultSize);
+    unsigned char *faceData = NULL;
+    if(fd_type == QCAMERA_FD_PREVIEW){
+        faceData = pFaceResult;
+    }else if(fd_type == QCAMERA_FD_SNAPSHOT){
+#ifndef VANILLA_HAL
+        //need fill meta type and meta data len first
+        int *data_header = (int* )pFaceResult;
+        data_header[0] = CAMERA_META_DATA_FD;
+        data_header[1] = (int)data_len;
+
+        if(data_len <= 0){
+            //if face is not valid or do not have face, return
+            qcamera_callback_argm_t cbArg;
+            memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+            cbArg.cb_type = QCAMERA_DATA_CALLBACK;
+            cbArg.msg_type = CAMERA_MSG_META_DATA;
+            cbArg.data = faceResultBuffer;
+            cbArg.user_data = faceResultBuffer;
+            cbArg.cookie = this;
+            cbArg.release_cb = releaseCameraMemory;
+            int32_t rc = m_cbNotifier.notifyCallback(cbArg);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: fail sending notification", __func__);
+                faceResultBuffer->release(faceResultBuffer);
+            }
+            return rc;
+        }
+#endif
+        faceData = pFaceResult + 2 *sizeof(int); //skip two int length
+    }
+
+    camera_frame_metadata_t *roiData = (camera_frame_metadata_t * ) faceData;
+    camera_face_t *faces = (camera_face_t *) ( faceData + sizeof(camera_frame_metadata_t) );
+
+    roiData->number_of_faces = fd_data->num_faces_detected;
+    roiData->faces = faces;
+    if (roiData->number_of_faces > 0) {
+        for (int i = 0; i < roiData->number_of_faces; i++) {
+            faces[i].id = fd_data->faces[i].face_id;
+            faces[i].score = fd_data->faces[i].score;
+
+            // left
+            faces[i].rect[0] =
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].face_boundary.left, display_dim.width, 2000, -1000);
+
+            // top
+            faces[i].rect[1] =
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].face_boundary.top, display_dim.height, 2000, -1000);
+
+            // right
+            faces[i].rect[2] = faces[i].rect[0] +
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].face_boundary.width, display_dim.width, 2000, 0);
+
+             // bottom
+            faces[i].rect[3] = faces[i].rect[1] +
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].face_boundary.height, display_dim.height, 2000, 0);
+
+            // Center of left eye
+            faces[i].left_eye[0] =
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].left_eye_center.x, display_dim.width, 2000, -1000);
+
+            faces[i].left_eye[1] =
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].left_eye_center.y, display_dim.height, 2000, -1000);
+
+            // Center of right eye
+            faces[i].right_eye[0] =
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].right_eye_center.x, display_dim.width, 2000, -1000);
+
+            faces[i].right_eye[1] =
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].right_eye_center.y, display_dim.height, 2000, -1000);
+
+            // Center of mouth
+            faces[i].mouth[0] =
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].mouth_center.x, display_dim.width, 2000, -1000);
+
+            faces[i].mouth[1] =
+                MAP_TO_DRIVER_COORDINATE(fd_data->faces[i].mouth_center.y, display_dim.height, 2000, -1000);
+
+#ifndef VANILLA_HAL
+            faces[i].smile_degree = fd_data->faces[i].smile_degree;
+            faces[i].smile_score = fd_data->faces[i].smile_confidence;
+            faces[i].blink_detected = fd_data->faces[i].blink_detected;
+            faces[i].face_recognised = fd_data->faces[i].face_recognised;
+            faces[i].gaze_angle = fd_data->faces[i].gaze_angle;
+
+            // upscale by 2 to recover from demaen downscaling
+            faces[i].updown_dir = fd_data->faces[i].updown_dir * 2;
+            faces[i].leftright_dir = fd_data->faces[i].leftright_dir * 2;
+            faces[i].roll_dir = fd_data->faces[i].roll_dir * 2;
+
+            faces[i].leye_blink = fd_data->faces[i].left_blink;
+            faces[i].reye_blink = fd_data->faces[i].right_blink;
+            faces[i].left_right_gaze = fd_data->faces[i].left_right_gaze;
+            faces[i].top_bottom_gaze = fd_data->faces[i].top_bottom_gaze;
+#endif
+
+        }
+    }
+
+    qcamera_callback_argm_t cbArg;
+    memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+    cbArg.cb_type = QCAMERA_DATA_CALLBACK;
+    if(fd_type == QCAMERA_FD_PREVIEW){
+        cbArg.msg_type = CAMERA_MSG_PREVIEW_METADATA;
+    }
+#ifndef VANILLA_HAL
+    else if(fd_type == QCAMERA_FD_SNAPSHOT){
+        cbArg.msg_type = CAMERA_MSG_META_DATA;
+    }
+#endif
+    cbArg.data = faceResultBuffer;
+    cbArg.metadata = roiData;
+    cbArg.user_data = faceResultBuffer;
+    cbArg.cookie = this;
+    cbArg.release_cb = releaseCameraMemory;
+    int32_t rc = m_cbNotifier.notifyCallback(cbArg);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: fail sending notification", __func__);
+        faceResultBuffer->release(faceResultBuffer);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseCameraMemory
+ *
+ * DESCRIPTION: releases camera memory objects
+ *
+ * PARAMETERS :
+ *   @data    : buffer to be released
+ *   @cookie  : context data
+ *   @cbStatus: callback status
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera2HardwareInterface::releaseCameraMemory(void *data,
+                                                    void */*cookie*/,
+                                                    int32_t /*cbStatus*/)
+{
+    camera_memory_t *mem = ( camera_memory_t * ) data;
+    if ( NULL != mem ) {
+        mem->release(mem);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : returnStreamBuffer
+ *
+ * DESCRIPTION: returns back a stream buffer
+ *
+ * PARAMETERS :
+ *   @data    : buffer to be released
+ *   @cookie  : context data
+ *   @cbStatus: callback status
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera2HardwareInterface::returnStreamBuffer(void *data,
+                                                   void *cookie,
+                                                   int32_t /*cbStatus*/)
+{
+    QCameraStream *stream = ( QCameraStream * ) cookie;
+    int idx = *((int *)data);
+    if ((NULL != stream) && (0 <= idx)) {
+        stream->bufDone((uint32_t)idx);
+    } else {
+        ALOGE("%s: Cannot return buffer %d %p", __func__, idx, cookie);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : processHistogramStats
+ *
+ * DESCRIPTION: process histogram stats
+ *
+ * PARAMETERS :
+ *   @hist_data : ptr to histogram stats struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::processHistogramStats(cam_hist_stats_t &stats_data)
+{
+#ifndef VANILLA_HAL
+    if (!mParameters.isHistogramEnabled()) {
+        CDBG_HIGH("%s: Histogram not enabled, no ops here", __func__);
+        return NO_ERROR;
+    }
+
+    camera_memory_t *histBuffer = mGetMemory(-1,
+                                             sizeof(cam_histogram_data_t),
+                                             1,
+                                             mCallbackCookie);
+    if ( NULL == histBuffer ) {
+        ALOGE("%s: Not enough memory for histogram data",
+              __func__);
+        return NO_MEMORY;
+    }
+
+    cam_histogram_data_t *pHistData = (cam_histogram_data_t *)histBuffer->data;
+    if (pHistData == NULL) {
+        ALOGE("%s: memory data ptr is NULL", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    switch (stats_data.type) {
+    case CAM_HISTOGRAM_TYPE_BAYER:
+        *pHistData = stats_data.bayer_stats.gb_stats;
+        break;
+    case CAM_HISTOGRAM_TYPE_YUV:
+        *pHistData = stats_data.yuv_stats;
+        break;
+    }
+
+    qcamera_callback_argm_t cbArg;
+    memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+    cbArg.cb_type = QCAMERA_DATA_CALLBACK;
+    cbArg.msg_type = CAMERA_MSG_STATS_DATA;
+    cbArg.data = histBuffer;
+    cbArg.user_data = histBuffer;
+    cbArg.cookie = this;
+    cbArg.release_cb = releaseCameraMemory;
+    int32_t rc = m_cbNotifier.notifyCallback(cbArg);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: fail sending notification", __func__);
+        histBuffer->release(histBuffer);
+    }
+#endif
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : calcThermalLevel
+ *
+ * DESCRIPTION: Calculates the target fps range depending on
+ *              the thermal level.
+ *
+ * PARAMETERS :
+ *   @level    : received thermal level
+ *   @minFPS   : minimum configured fps range
+ *   @maxFPS   : maximum configured fps range
+ *   @adjustedRange : target fps range
+ *   @skipPattern : target skip pattern
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::calcThermalLevel(
+            qcamera_thermal_level_enum_t level,
+            const int minFPSi,
+            const int maxFPSi,
+            cam_fps_range_t &adjustedRange,
+            enum msm_vfe_frame_skip_pattern &skipPattern)
+{
+    const float minFPS = (float)minFPSi;
+    const float maxFPS = (float)maxFPSi;
+
+    // Initialize video fps to preview fps
+    float minVideoFps = minFPS, maxVideoFps = maxFPS;
+    cam_fps_range_t videoFps;
+    // If HFR mode, update video fps accordingly
+    if(isHFRMode()) {
+        mParameters.getHfrFps(videoFps);
+        minVideoFps = videoFps.video_min_fps;
+        maxVideoFps = videoFps.video_max_fps;
+    }
+
+    CDBG_HIGH("%s: level: %d, preview minfps %f, preview maxfpS %f, "
+              "video minfps %f, video maxfpS %f",
+            __func__, level, minFPS, maxFPS, minVideoFps, maxVideoFps);
+
+    switch(level) {
+    case QCAMERA_THERMAL_NO_ADJUSTMENT:
+        {
+            adjustedRange.min_fps = minFPS / 1000.0f;
+            adjustedRange.max_fps = maxFPS / 1000.0f;
+            adjustedRange.video_min_fps = minVideoFps / 1000.0f;
+            adjustedRange.video_max_fps = maxVideoFps / 1000.0f;
+            skipPattern = NO_SKIP;
+        }
+        break;
+    case QCAMERA_THERMAL_SLIGHT_ADJUSTMENT:
+        {
+            adjustedRange.min_fps = minFPS / 1000.0f;
+            adjustedRange.max_fps = maxFPS / 1000.0f;
+            adjustedRange.min_fps -= 0.1f * adjustedRange.min_fps;
+            adjustedRange.max_fps -= 0.1f * adjustedRange.max_fps;
+            adjustedRange.video_min_fps = minVideoFps / 1000.0f;
+            adjustedRange.video_max_fps = maxVideoFps / 1000.0f;
+            adjustedRange.video_min_fps -= 0.1f * adjustedRange.video_min_fps;
+            adjustedRange.video_max_fps -= 0.1f * adjustedRange.video_max_fps;
+            if ( adjustedRange.min_fps < 1 ) {
+                adjustedRange.min_fps = 1;
+            }
+            if ( adjustedRange.max_fps < 1 ) {
+                adjustedRange.max_fps = 1;
+            }
+            if ( adjustedRange.video_min_fps < 1 ) {
+                adjustedRange.video_min_fps = 1;
+            }
+            if ( adjustedRange.video_max_fps < 1 ) {
+                adjustedRange.video_max_fps = 1;
+            }
+            skipPattern = EVERY_2FRAME;
+        }
+        break;
+    case QCAMERA_THERMAL_BIG_ADJUSTMENT:
+        {
+            adjustedRange.min_fps = minFPS / 1000.0f;
+            adjustedRange.max_fps = maxFPS / 1000.0f;
+            adjustedRange.min_fps -= 0.2f * adjustedRange.min_fps;
+            adjustedRange.max_fps -= 0.2f * adjustedRange.max_fps;
+            adjustedRange.video_min_fps = minVideoFps / 1000.0f;
+            adjustedRange.video_max_fps = maxVideoFps / 1000.0f;
+            adjustedRange.video_min_fps -= 0.2f * adjustedRange.video_min_fps;
+            adjustedRange.video_max_fps -= 0.2f * adjustedRange.video_max_fps;
+            if ( adjustedRange.min_fps < 1 ) {
+                adjustedRange.min_fps = 1;
+            }
+            if ( adjustedRange.max_fps < 1 ) {
+                adjustedRange.max_fps = 1;
+            }
+            if ( adjustedRange.video_min_fps < 1 ) {
+                adjustedRange.video_min_fps = 1;
+            }
+            if ( adjustedRange.video_max_fps < 1 ) {
+                adjustedRange.video_max_fps = 1;
+            }
+            skipPattern = EVERY_4FRAME;
+        }
+        break;
+    case QCAMERA_THERMAL_SHUTDOWN:
+        {
+            // Stop Preview?
+            // Set lowest min FPS for now
+            adjustedRange.min_fps = minFPS/1000.0f;
+            adjustedRange.max_fps = minFPS/1000.0f;
+            for (size_t i = 0; i < gCamCaps[mCameraId]->fps_ranges_tbl_cnt; i++) {
+                if (gCamCaps[mCameraId]->fps_ranges_tbl[i].min_fps < adjustedRange.min_fps) {
+                    adjustedRange.min_fps = gCamCaps[mCameraId]->fps_ranges_tbl[i].min_fps;
+                    adjustedRange.max_fps = adjustedRange.min_fps;
+                }
+            }
+            skipPattern = MAX_SKIP;
+            adjustedRange.video_min_fps = adjustedRange.min_fps;
+            adjustedRange.video_max_fps = adjustedRange.max_fps;
+        }
+        break;
+    default:
+        {
+            ALOGE("%s: Invalid thermal level %d", __func__, level);
+            return BAD_VALUE;
+        }
+        break;
+    }
+    CDBG_HIGH("%s: Thermal level %d, FPS [%3.2f,%3.2f, %3.2f,%3.2f], frameskip %d",
+          __func__, level, adjustedRange.min_fps, adjustedRange.max_fps,
+          adjustedRange.video_min_fps, adjustedRange.video_max_fps, skipPattern);
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : recalcFPSRange
+ *
+ * DESCRIPTION: adjust the configured fps range regarding
+ *              the last thermal level.
+ *
+ * PARAMETERS :
+ *   @minFPS   : minimum configured fps range
+ *   @maxFPS   : maximum configured fps range
+ *   @adjustedRange : target fps range
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::recalcFPSRange(int &minFPS, int &maxFPS,
+        cam_fps_range_t &adjustedRange)
+{
+    enum msm_vfe_frame_skip_pattern skipPattern;
+    calcThermalLevel(mThermalLevel,
+                     minFPS,
+                     maxFPS,
+                     adjustedRange,
+                     skipPattern);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : updateThermalLevel
+ *
+ * DESCRIPTION: update thermal level depending on thermal events
+ *
+ * PARAMETERS :
+ *   @level   : thermal level
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::updateThermalLevel(void *thermal_level)
+{
+    int ret = NO_ERROR;
+    cam_fps_range_t adjustedRange;
+    int minFPS, maxFPS;
+    enum msm_vfe_frame_skip_pattern skipPattern;
+    qcamera_thermal_level_enum_t level = *(qcamera_thermal_level_enum_t *)thermal_level;
+
+    pthread_mutex_lock(&m_parm_lock);
+
+    if (!mCameraOpened) {
+        CDBG_HIGH("%s: Camera is not opened, no need to update camera parameters", __func__);
+        pthread_mutex_unlock(&m_parm_lock);
+        return NO_ERROR;
+    }
+
+    mParameters.getPreviewFpsRange(&minFPS, &maxFPS);
+    qcamera_thermal_mode thermalMode = mParameters.getThermalMode();
+    calcThermalLevel(level, minFPS, maxFPS, adjustedRange, skipPattern);
+    mThermalLevel = level;
+
+    if (thermalMode == QCAMERA_THERMAL_ADJUST_FPS)
+        ret = mParameters.adjustPreviewFpsRange(&adjustedRange);
+    else if (thermalMode == QCAMERA_THERMAL_ADJUST_FRAMESKIP)
+        ret = mParameters.setFrameSkip(skipPattern);
+    else
+        ALOGE("%s: Incorrect thermal mode %d", __func__, thermalMode);
+
+    pthread_mutex_unlock(&m_parm_lock);
+
+    return ret;
+
+}
+
+/*===========================================================================
+ * FUNCTION   : updateParameters
+ *
+ * DESCRIPTION: update parameters
+ *
+ * PARAMETERS :
+ *   @parms       : input parameters string
+ *   @needRestart : output, flag to indicate if preview restart is needed
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2HardwareInterface::updateParameters(const char *parms, bool &needRestart)
+{
+    int rc = NO_ERROR;
+    pthread_mutex_lock(&m_parm_lock);
+    String8 str = String8(parms);
+    QCameraParameters param(str);
+    rc =  mParameters.updateParameters(param, needRestart);
+
+    // update stream based parameter settings
+    for (int i = 0; i < QCAMERA_CH_TYPE_MAX; i++) {
+        if (m_channels[i] != NULL) {
+            m_channels[i]->UpdateStreamBasedParameters(mParameters);
+        }
+    }
+    pthread_mutex_unlock(&m_parm_lock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : commitParameterChanges
+ *
+ * DESCRIPTION: commit parameter changes to the backend to take effect
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ * NOTE       : This function must be called after updateParameters.
+ *              Otherwise, no change will be passed to backend to take effect.
+ *==========================================================================*/
+int QCamera2HardwareInterface::commitParameterChanges()
+{
+    int rc = NO_ERROR;
+    pthread_mutex_lock(&m_parm_lock);
+    rc = mParameters.commitParameters();
+    if (rc == NO_ERROR) {
+        // update number of snapshot based on committed parameters setting
+        rc = mParameters.setNumOfSnapshot();
+    }
+    pthread_mutex_unlock(&m_parm_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : needDebugFps
+ *
+ * DESCRIPTION: if fps log info need to be printed out
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: need print out fps log
+ *              false: no need to print out fps log
+ *==========================================================================*/
+bool QCamera2HardwareInterface::needDebugFps()
+{
+    bool needFps = false;
+    pthread_mutex_lock(&m_parm_lock);
+    needFps = mParameters.isFpsDebugEnabled();
+    pthread_mutex_unlock(&m_parm_lock);
+    return needFps;
+}
+
+/*===========================================================================
+ * FUNCTION   : isCACEnabled
+ *
+ * DESCRIPTION: if CAC is enabled
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera2HardwareInterface::isCACEnabled()
+{
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.feature.cac", prop, "0");
+    int enableCAC = atoi(prop);
+    return enableCAC == 1;
+}
+
+/*===========================================================================
+ * FUNCTION   : is4k2kResolution
+ *
+ * DESCRIPTION: if resolution is 4k x 2k or true 4k x 2k
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera2HardwareInterface::is4k2kResolution(cam_dimension_t* resolution)
+{
+   bool enabled = false;
+   if ((resolution->width == 4096 && resolution->height == 2160) ||
+       (resolution->width == 3840 && resolution->height == 2160) ) {
+      enabled = true;
+   }
+   return enabled;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : isAFRunning
+ *
+ * DESCRIPTION: if AF is in progress while in Auto/Macro focus modes
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: AF in progress
+ *              false: AF not in progress
+ *==========================================================================*/
+bool QCamera2HardwareInterface::isAFRunning()
+{
+    bool isAFInProgress = (m_currentFocusState == CAM_AF_SCANNING &&
+            (mParameters.getFocusMode() == CAM_FOCUS_MODE_AUTO ||
+            mParameters.getFocusMode() == CAM_FOCUS_MODE_MACRO));
+
+    return isAFInProgress;
+}
+
+/*===========================================================================
+ * FUNCTION   : isPreviewRestartEnabled
+ *
+ * DESCRIPTION: Check whether preview should be restarted automatically
+ *              during image capture.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera2HardwareInterface::isPreviewRestartEnabled()
+{
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.feature.restart", prop, "0");
+    int earlyRestart = atoi(prop);
+    return earlyRestart == 1;
+}
+
+/*===========================================================================
+ * FUNCTION   : needReprocess
+ *
+ * DESCRIPTION: if reprocess is needed
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera2HardwareInterface::needReprocess()
+{
+    pthread_mutex_lock(&m_parm_lock);
+
+    if (mParameters.getofflineRAW()) {
+        pthread_mutex_unlock(&m_parm_lock);
+        return true;
+    }
+    if (!mParameters.isJpegPictureFormat() &&
+        !mParameters.isNV21PictureFormat()) {
+        // RAW image, no need to reprocess
+        pthread_mutex_unlock(&m_parm_lock);
+        return false;
+    }
+
+    if (mParameters.isHDREnabled()) {
+        CDBG_HIGH("%s: need do reprocess for HDR", __func__);
+        pthread_mutex_unlock(&m_parm_lock);
+        return true;
+    }
+    //Disable reprocess for 4K liveshot case
+    if (mParameters.is4k2kVideoResolution()&& mParameters.getRecordingHintValue()) {
+        //Disable reprocess for 4K liveshot case
+        pthread_mutex_unlock(&m_parm_lock);
+        return false;
+    }
+    if ((gCamCaps[mCameraId]->qcom_supported_feature_mask & CAM_QCOM_FEATURE_ROTATION) > 0 &&
+            (mParameters.getJpegRotation() > 0)) {
+            // current rotation is not zero, and pp has the capability to process rotation
+            CDBG_HIGH("%s: need to do reprocess for rotation=%d",
+                    __func__, mParameters.getJpegRotation());
+            pthread_mutex_unlock(&m_parm_lock);
+            return true;
+    }
+
+    if (isZSLMode()) {
+        if (((gCamCaps[mCameraId]->min_required_pp_mask > 0) ||
+             mParameters.isWNREnabled() || isCACEnabled())) {
+            // TODO: add for ZSL HDR later
+            CDBG_HIGH("%s: need do reprocess for ZSL WNR or min PP reprocess", __func__);
+            pthread_mutex_unlock(&m_parm_lock);
+            return true;
+        }
+
+        int snapshot_flipMode =
+            mParameters.getFlipMode(CAM_STREAM_TYPE_SNAPSHOT);
+        if (snapshot_flipMode > 0) {
+            CDBG_HIGH("%s: Need do flip for snapshot in ZSL mode", __func__);
+            pthread_mutex_unlock(&m_parm_lock);
+            return true;
+        }
+    } else {
+        if (gCamCaps[mCameraId]->min_required_pp_mask & CAM_QCOM_FEATURE_PP_SUPERSET) {
+            CDBG_HIGH("%s: Need CPP in non-ZSL mode", __func__);
+            pthread_mutex_unlock(&m_parm_lock);
+            return true;
+        }
+    }
+
+    if ((gCamCaps[mCameraId]->qcom_supported_feature_mask & CAM_QCOM_FEATURE_SCALE) > 0 &&
+        mParameters.m_reprocScaleParam.isScaleEnabled() &&
+        mParameters.m_reprocScaleParam.isUnderScaling()) {
+        // Reproc Scale is enaled and also need Scaling to current Snapshot
+        CDBG_HIGH("%s: need do reprocess for scale", __func__);
+        pthread_mutex_unlock(&m_parm_lock);
+        return true;
+    }
+
+    if (mParameters.isUbiFocusEnabled() |
+            mParameters.isUbiRefocus() |
+            mParameters.isChromaFlashEnabled() |
+            mParameters.isHDREnabled() |
+            mParameters.isOptiZoomEnabled() |
+            mParameters.isStillMoreEnabled()) {
+        CDBG_HIGH("%s: need reprocess for |UbiFocus=%d|ChramaFlash=%d|OptiZoom=%d|StillMore=%d|",
+                 __func__,
+                mParameters.isUbiFocusEnabled(),
+                mParameters.isChromaFlashEnabled(),
+                mParameters.isOptiZoomEnabled(),
+                mParameters.isStillMoreEnabled());
+        pthread_mutex_unlock(&m_parm_lock);
+        return true;
+    }
+
+    pthread_mutex_unlock(&m_parm_lock);
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : needRotationReprocess
+ *
+ * DESCRIPTION: if rotation needs to be done by reprocess in pp
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera2HardwareInterface::needRotationReprocess()
+{
+    pthread_mutex_lock(&m_parm_lock);
+    if (!mParameters.isJpegPictureFormat() &&
+        !mParameters.isNV21PictureFormat()) {
+        // RAW image, no need to reprocess
+        pthread_mutex_unlock(&m_parm_lock);
+        return false;
+    }
+
+    if (mParameters.is4k2kVideoResolution()&& mParameters.getRecordingHintValue()) {
+        //Disable reprocess for 4K liveshot case
+        pthread_mutex_unlock(&m_parm_lock);
+        return false;
+    }
+
+    if ((gCamCaps[mCameraId]->qcom_supported_feature_mask & CAM_QCOM_FEATURE_ROTATION) > 0 &&
+            (mParameters.getJpegRotation() > 0)) {
+        // current rotation is not zero, and pp has the capability to process rotation
+        CDBG_HIGH("%s: need to do reprocess for rotation=%d",
+                __func__, mParameters.getJpegRotation());
+        pthread_mutex_unlock(&m_parm_lock);
+        return true;
+    }
+
+    pthread_mutex_unlock(&m_parm_lock);
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : needScaleReprocess
+ *
+ * DESCRIPTION: if scale needs to be done by reprocess in pp
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera2HardwareInterface::needScaleReprocess()
+{
+    pthread_mutex_lock(&m_parm_lock);
+    if (!mParameters.isJpegPictureFormat() &&
+        !mParameters.isNV21PictureFormat()) {
+        // RAW image, no need to reprocess
+        pthread_mutex_unlock(&m_parm_lock);
+        return false;
+    }
+
+    if ((gCamCaps[mCameraId]->qcom_supported_feature_mask & CAM_QCOM_FEATURE_SCALE) > 0 &&
+        mParameters.m_reprocScaleParam.isScaleEnabled() &&
+        mParameters.m_reprocScaleParam.isUnderScaling()) {
+        // Reproc Scale is enaled and also need Scaling to current Snapshot
+        CDBG_HIGH("%s: need do reprocess for scale", __func__);
+        pthread_mutex_unlock(&m_parm_lock);
+        return true;
+    }
+
+    pthread_mutex_unlock(&m_parm_lock);
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : getThumbnailSize
+ *
+ * DESCRIPTION: get user set thumbnail size
+ *
+ * PARAMETERS :
+ *   @dim     : output of thumbnail dimension
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera2HardwareInterface::getThumbnailSize(cam_dimension_t &dim)
+{
+    pthread_mutex_lock(&m_parm_lock);
+    mParameters.getThumbnailSize(&dim.width, &dim.height);
+    pthread_mutex_unlock(&m_parm_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : getJpegQuality
+ *
+ * DESCRIPTION: get user set jpeg quality
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : jpeg quality setting
+ *==========================================================================*/
+uint32_t QCamera2HardwareInterface::getJpegQuality()
+{
+    uint32_t quality = 0;
+    pthread_mutex_lock(&m_parm_lock);
+    quality =  mParameters.getJpegQuality();
+    pthread_mutex_unlock(&m_parm_lock);
+    return quality;
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifData
+ *
+ * DESCRIPTION: get exif data to be passed into jpeg encoding
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : exif data from user setting and GPS
+ *==========================================================================*/
+QCameraExif *QCamera2HardwareInterface::getExifData()
+{
+    QCameraExif *exif = new QCameraExif();
+    if (exif == NULL) {
+        ALOGE("%s: No memory for QCameraExif", __func__);
+        return NULL;
+    }
+
+    int32_t rc = NO_ERROR;
+
+    pthread_mutex_lock(&m_parm_lock);
+
+    // add exif entries
+    String8 dateTime, subSecTime;
+    rc = mParameters.getExifDateTime(dateTime, subSecTime);
+    if(rc == NO_ERROR) {
+        exif->addEntry(EXIFTAGID_DATE_TIME, EXIF_ASCII,
+                (uint32_t)(dateTime.length() + 1), (void *)dateTime.string());
+        exif->addEntry(EXIFTAGID_EXIF_DATE_TIME_ORIGINAL, EXIF_ASCII,
+                (uint32_t)(dateTime.length() + 1), (void *)dateTime.string());
+        exif->addEntry(EXIFTAGID_EXIF_DATE_TIME_DIGITIZED, EXIF_ASCII,
+                (uint32_t)(dateTime.length() + 1), (void *)dateTime.string());
+        exif->addEntry(EXIFTAGID_SUBSEC_TIME, EXIF_ASCII,
+                (uint32_t)(subSecTime.length() + 1), (void *)subSecTime.string());
+        exif->addEntry(EXIFTAGID_SUBSEC_TIME_ORIGINAL, EXIF_ASCII,
+                (uint32_t)(subSecTime.length() + 1), (void *)subSecTime.string());
+        exif->addEntry(EXIFTAGID_SUBSEC_TIME_DIGITIZED, EXIF_ASCII,
+                (uint32_t)(subSecTime.length() + 1), (void *)subSecTime.string());
+    } else {
+        ALOGE("%s: getExifDateTime failed", __func__);
+    }
+
+    rat_t focalLength;
+    rc = mParameters.getExifFocalLength(&focalLength);
+    if (rc == NO_ERROR) {
+        exif->addEntry(EXIFTAGID_FOCAL_LENGTH,
+                       EXIF_RATIONAL,
+                       1,
+                       (void *)&(focalLength));
+    } else {
+        ALOGE("%s: getExifFocalLength failed", __func__);
+    }
+
+    uint16_t isoSpeed = mParameters.getExifIsoSpeed();
+    if (getSensorType() != CAM_SENSOR_YUV) {
+        exif->addEntry(EXIFTAGID_ISO_SPEED_RATING,
+                       EXIF_SHORT,
+                       1,
+                       (void *)&(isoSpeed));
+    }
+
+    char gpsProcessingMethod[EXIF_ASCII_PREFIX_SIZE + GPS_PROCESSING_METHOD_SIZE];
+    uint32_t count = 0;
+    rc = mParameters.getExifGpsProcessingMethod(gpsProcessingMethod, count);
+    if(rc == NO_ERROR) {
+        exif->addEntry(EXIFTAGID_GPS_PROCESSINGMETHOD,
+                       EXIF_ASCII,
+                       count,
+                       (void *)gpsProcessingMethod);
+    } else {
+        ALOGE("%s: getExifGpsProcessingMethod failed", __func__);
+    }
+
+    rat_t latitude[3];
+    char latRef[2];
+    rc = mParameters.getExifLatitude(latitude, latRef);
+    if(rc == NO_ERROR) {
+        exif->addEntry(EXIFTAGID_GPS_LATITUDE,
+                       EXIF_RATIONAL,
+                       3,
+                       (void *)latitude);
+        exif->addEntry(EXIFTAGID_GPS_LATITUDE_REF,
+                       EXIF_ASCII,
+                       2,
+                       (void *)latRef);
+    } else {
+        ALOGE("%s: getExifLatitude failed", __func__);
+    }
+
+    rat_t longitude[3];
+    char lonRef[2];
+    rc = mParameters.getExifLongitude(longitude, lonRef);
+    if(rc == NO_ERROR) {
+        exif->addEntry(EXIFTAGID_GPS_LONGITUDE,
+                       EXIF_RATIONAL,
+                       3,
+                       (void *)longitude);
+
+        exif->addEntry(EXIFTAGID_GPS_LONGITUDE_REF,
+                       EXIF_ASCII,
+                       2,
+                       (void *)lonRef);
+    } else {
+        ALOGE("%s: getExifLongitude failed", __func__);
+    }
+
+    rat_t altitude;
+    char altRef;
+    rc = mParameters.getExifAltitude(&altitude, &altRef);
+    if(rc == NO_ERROR) {
+        exif->addEntry(EXIFTAGID_GPS_ALTITUDE,
+                       EXIF_RATIONAL,
+                       1,
+                       (void *)&(altitude));
+
+        exif->addEntry(EXIFTAGID_GPS_ALTITUDE_REF,
+                       EXIF_BYTE,
+                       1,
+                       (void *)&altRef);
+    } else {
+        ALOGE("%s: getExifAltitude failed", __func__);
+    }
+
+    char gpsDateStamp[20];
+    rat_t gpsTimeStamp[3];
+    rc = mParameters.getExifGpsDateTimeStamp(gpsDateStamp, 20, gpsTimeStamp);
+    if(rc == NO_ERROR) {
+        exif->addEntry(EXIFTAGID_GPS_DATESTAMP,
+                       EXIF_ASCII,
+                       (uint32_t)(strlen(gpsDateStamp) + 1),
+                       (void *)gpsDateStamp);
+
+        exif->addEntry(EXIFTAGID_GPS_TIMESTAMP,
+                       EXIF_RATIONAL,
+                       3,
+                       (void *)gpsTimeStamp);
+    } else {
+        ALOGE("%s: getExifGpsDataTimeStamp failed", __func__);
+    }
+
+#ifdef ENABLE_MODEL_INFO_EXIF
+
+    char value[PROPERTY_VALUE_MAX];
+    if (property_get("ro.product.manufacturer", value, "QCOM-AA") > 0) {
+        exif->addEntry(EXIFTAGID_MAKE, EXIF_ASCII,
+                (uint32_t)(strlen(value) + 1), (void *)value);
+    } else {
+        ALOGE("%s: getExifMaker failed", __func__);
+    }
+
+    if (property_get("ro.product.model", value, "QCAM-AA") > 0) {
+        exif->addEntry(EXIFTAGID_MODEL, EXIF_ASCII,
+                (uint32_t)(strlen(value) + 1), (void *)value);
+    } else {
+        ALOGE("%s: getExifModel failed", __func__);
+    }
+
+    if (property_get("ro.build.description", value, "QCAM-AA") > 0) {
+        exif->addEntry(EXIFTAGID_SOFTWARE, EXIF_ASCII,
+                (uint32_t)(strlen(value) + 1), (void *)value);
+    } else {
+        ALOGE("%s: getExifSoftware failed", __func__);
+    }
+
+#endif
+
+    if (mParameters.useJpegExifRotation()) {
+        int16_t orientation;
+        switch (mParameters.getJpegExifRotation()) {
+        case 0:
+            orientation = 1;
+            break;
+        case 90:
+            orientation = 6;
+            break;
+        case 180:
+            orientation = 3;
+            break;
+        case 270:
+            orientation = 8;
+            break;
+        default:
+            orientation = 1;
+            break;
+        }
+        exif->addEntry(EXIFTAGID_ORIENTATION,
+                EXIF_SHORT,
+                1,
+                (void *)&orientation);
+        exif->addEntry(EXIFTAGID_TN_ORIENTATION,
+                EXIF_SHORT,
+                1,
+                (void *)&orientation);
+    }
+
+    pthread_mutex_unlock(&m_parm_lock);
+    return exif;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHistogram
+ *
+ * DESCRIPTION: set if histogram should be enabled
+ *
+ * PARAMETERS :
+ *   @histogram_en : bool flag if histogram should be enabled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::setHistogram(bool histogram_en)
+{
+    return mParameters.setHistogram(histogram_en);
+}
+
+/*===========================================================================
+ * FUNCTION   : setFaceDetection
+ *
+ * DESCRIPTION: set if face detection should be enabled
+ *
+ * PARAMETERS :
+ *   @enabled : bool flag if face detection should be enabled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::setFaceDetection(bool enabled)
+{
+    return mParameters.setFaceDetection(enabled, true);
+}
+
+/*===========================================================================
+ * FUNCTION   : isCaptureShutterEnabled
+ *
+ * DESCRIPTION: Check whether shutter should be triggered immediately after
+ *              capture
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : true - regular capture
+ *              false - other type of capture
+ *==========================================================================*/
+bool QCamera2HardwareInterface::isCaptureShutterEnabled()
+{
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.feature.shutter", prop, "0");
+    int enableShutter = atoi(prop);
+    return enableShutter == 1;
+}
+
+/*===========================================================================
+ * FUNCTION   : needProcessPreviewFrame
+ *
+ * DESCRIPTION: returns whether preview frame need to be displayed
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+bool QCamera2HardwareInterface::needProcessPreviewFrame()
+{
+    return m_stateMachine.isPreviewRunning()
+            && mParameters.isDisplayFrameNeeded();
+};
+
+/*===========================================================================
+ * FUNCTION   : prepareHardwareForSnapshot
+ *
+ * DESCRIPTION: prepare hardware for snapshot, such as LED
+ *
+ * PARAMETERS :
+ *   @afNeeded: flag indicating if Auto Focus needs to be done during preparation
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::prepareHardwareForSnapshot(int32_t afNeeded)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s: Prepare hardware such as LED",__func__);
+    return mCameraHandle->ops->prepare_snapshot(mCameraHandle->camera_handle,
+                                                afNeeded);
+}
+
+/*===========================================================================
+ * FUNCTION   : needFDMetadata
+ *
+ * DESCRIPTION: check whether we need process Face Detection metadata in this chanel
+ *
+ * PARAMETERS :
+ *   @channel_type: channel type
+ *
+  * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera2HardwareInterface::needFDMetadata(qcamera_ch_type_enum_t channel_type)
+{
+    //Note: Currently we only process ZSL channel
+    bool value = false;
+    if(channel_type == QCAMERA_CH_TYPE_ZSL){
+        //check if FD requirement is enabled
+        if(mParameters.isSnapshotFDNeeded() &&
+           mParameters.isFaceDetectionEnabled()){
+            value = true;
+            CDBG_HIGH("%s: Face Detection metadata is required in ZSL mode.", __func__);
+        }
+    }
+
+    return value;
+}
+
+/*===========================================================================
+ * FUNCTION   : defferedWorkRoutine
+ *
+ * DESCRIPTION: data process routine that executes deffered tasks
+ *
+ * PARAMETERS :
+ *   @data    : user data ptr (QCamera2HardwareInterface)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void *QCamera2HardwareInterface::defferedWorkRoutine(void *obj)
+{
+    int running = 1;
+    int ret;
+    uint8_t is_active = FALSE;
+
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)obj;
+    QCameraCmdThread *cmdThread = &pme->mDefferedWorkThread;
+    cmdThread->setName("CAM_defrdWrk");
+
+    do {
+        do {
+            ret = cam_sem_wait(&cmdThread->cmd_sem);
+            if (ret != 0 && errno != EINVAL) {
+                ALOGE("%s: cam_sem_wait error (%s)",
+                        __func__, strerror(errno));
+                return NULL;
+            }
+        } while (ret != 0);
+
+        // we got notified about new cmd avail in cmd queue
+        camera_cmd_type_t cmd = cmdThread->getCmd();
+        switch (cmd) {
+        case CAMERA_CMD_TYPE_START_DATA_PROC:
+            CDBG_HIGH("%s: start data proc", __func__);
+            is_active = TRUE;
+            break;
+        case CAMERA_CMD_TYPE_STOP_DATA_PROC:
+            CDBG_HIGH("%s: stop data proc", __func__);
+            is_active = FALSE;
+            // signal cmd is completed
+            cam_sem_post(&cmdThread->sync_sem);
+            break;
+        case CAMERA_CMD_TYPE_DO_NEXT_JOB:
+            {
+                DeffWork *dw =
+                    reinterpret_cast<DeffWork *>(pme->mCmdQueue.dequeue());
+
+                if ( NULL == dw ) {
+                    ALOGE("%s : Invalid deferred work", __func__);
+                    break;
+                }
+
+                switch( dw->cmd ) {
+                case CMD_DEFF_ALLOCATE_BUFF:
+                    {
+                        QCameraChannel * pChannel = dw->args.allocArgs.ch;
+
+                        if ( NULL == pChannel ) {
+                            ALOGE("%s : Invalid deferred work channel",
+                                    __func__);
+                            break;
+                        }
+
+                        cam_stream_type_t streamType = dw->args.allocArgs.type;
+                        CDBG_HIGH("%s: Deffered buffer allocation started for stream type: %d",
+                                __func__, streamType);
+
+                        uint32_t iNumOfStreams = pChannel->getNumOfStreams();
+                        QCameraStream *pStream = NULL;
+                        for ( uint32_t i = 0; i < iNumOfStreams; ++i) {
+                            pStream = pChannel->getStreamByIndex(i);
+
+                            if ( NULL == pStream ) {
+                                break;
+                            }
+
+                            if ( pStream->isTypeOf(streamType)) {
+                                if ( pStream->allocateBuffers() ) {
+                                    ALOGE("%s: Error allocating buffers !!!",
+                                            __func__);
+                                }
+                                break;
+                            }
+                        }
+                        {
+                            Mutex::Autolock l(pme->mDeffLock);
+                            pme->mDeffOngoingJobs[dw->id] = false;
+                            CDBG_HIGH("%s: Deffered buffer allocation done for stream type: %d",
+                                    __func__, streamType);
+                            delete dw;
+                            pme->mDeffCond.signal();
+                        }
+
+                    }
+                    break;
+                case CMD_DEFF_PPROC_START:
+                    {
+                        QCameraChannel * pChannel = dw->args.pprocArgs;
+                        assert(pChannel);
+
+                        if (pme->m_postprocessor.start(pChannel) != NO_ERROR) {
+                            ALOGE("%s: cannot start postprocessor", __func__);
+                            pme->delChannel(QCAMERA_CH_TYPE_CAPTURE);
+                        }
+                        {
+                            Mutex::Autolock l(pme->mDeffLock);
+                            pme->mDeffOngoingJobs[dw->id] = false;
+                            delete dw;
+                            pme->mDeffCond.broadcast();
+                        }
+                    }
+                    break;
+                default:
+                    ALOGE("%s[%d]:  Incorrect command : %d",
+                            __func__,
+                            __LINE__,
+                            dw->cmd);
+                }
+            }
+            break;
+        case CAMERA_CMD_TYPE_EXIT:
+            running = 0;
+            break;
+        default:
+            break;
+        }
+    } while (running);
+
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : queueDefferedWork
+ *
+ * DESCRIPTION: function which queues deferred tasks
+ *
+ * PARAMETERS :
+ *   @cmd     : deferred task
+ *   @args    : deffered task arguments
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::queueDefferedWork(DefferedWorkCmd cmd,
+                                                     DefferWorkArgs args)
+{
+    Mutex::Autolock l(mDeffLock);
+    for (uint32_t i = 0; i < MAX_ONGOING_JOBS; ++i) {
+        if (!mDeffOngoingJobs[i]) {
+            DeffWork *dw = new DeffWork(cmd, i, args);
+            if (mCmdQueue.enqueue(dw)) {
+                mDeffOngoingJobs[i] = true;
+                mDefferedWorkThread.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB,
+                        FALSE,
+                        FALSE);
+                return (int32_t)i;
+            } else {
+                CDBG("%s: Command queue not active! cmd = %d", __func__, cmd);
+                delete dw;
+                return -1;
+            }
+        }
+    }
+    return -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : waitDefferedWork
+ *
+ * DESCRIPTION: waits for a deffered task to finish
+ *
+ * PARAMETERS :
+ *   @job_id  : deferred task id
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::waitDefferedWork(int32_t &job_id)
+{
+    Mutex::Autolock l(mDeffLock);
+
+    if ((MAX_ONGOING_JOBS <= job_id) || (0 > job_id)) {
+        return NO_ERROR;
+    }
+
+    while ( mDeffOngoingJobs[job_id] == true ) {
+        mDeffCond.wait(mDeffLock);
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : isRegularCapture
+ *
+ * DESCRIPTION: Check configuration for regular catpure
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : true - regular capture
+ *              false - other type of capture
+ *==========================================================================*/
+bool QCamera2HardwareInterface::isRegularCapture()
+{
+    bool ret = false;
+
+    if (numOfSnapshotsExpected() == 1 &&
+        !isLongshotEnabled() &&
+        !mParameters.isHDREnabled() &&
+        !mParameters.getRecordingHintValue() &&
+        !isZSLMode() && !mParameters.getofflineRAW()) {
+            ret = true;
+    }
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getLogLevel
+ *
+ * DESCRIPTION: Reads the log level property into a variable
+ *
+ * PARAMETERS :
+ *   None
+ *
+ * RETURN     :
+ *   None
+ *==========================================================================*/
+void QCamera2HardwareInterface::getLogLevel()
+{
+    char prop[PROPERTY_VALUE_MAX];
+    uint32_t globalLogLevel = 0;
+
+    property_get("persist.camera.hal.debug", prop, "0");
+    int val = atoi(prop);
+    if (0 <= val) {
+        gCamHalLogLevel = (uint32_t)val;
+    }
+    property_get("persist.camera.global.debug", prop, "0");
+    val = atoi(prop);
+    if (0 <= val) {
+        globalLogLevel = (uint32_t)val;
+    }
+
+    /* Highest log level among hal.logs and global.logs is selected */
+    if (gCamHalLogLevel < globalLogLevel)
+        gCamHalLogLevel = globalLogLevel;
+
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : getSensorType
+ *
+ * DESCRIPTION: Returns the type of sensor being used whether YUV or Bayer
+ *
+ * PARAMETERS :
+ *   None
+ *
+ * RETURN     : Type of sensor - bayer or YUV
+ *
+ *==========================================================================*/
+cam_sensor_t QCamera2HardwareInterface::getSensorType()
+{
+    return gCamCaps[mCameraId]->sensor_type.sens_type;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL/QCamera2HWI.h b/camera/QCamera2/HAL/QCamera2HWI.h
new file mode 100644
index 0000000..5a6a841
--- /dev/null
+++ b/camera/QCamera2/HAL/QCamera2HWI.h
@@ -0,0 +1,644 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA2HARDWAREINTERFACE_H__
+#define __QCAMERA2HARDWAREINTERFACE_H__
+
+#include <hardware/camera.h>
+#include <utils/Log.h>
+#include <utils/Mutex.h>
+#include <utils/Condition.h>
+#include <QCameraParameters.h>
+
+#include "QCameraQueue.h"
+#include "QCameraCmdThread.h"
+#include "QCameraChannel.h"
+#include "QCameraStream.h"
+#include "QCameraStateMachine.h"
+#include "QCameraAllocator.h"
+#include "QCameraPostProc.h"
+#include "QCameraThermalAdapter.h"
+#include "QCameraMem.h"
+#include "QCameraPerf.h"
+
+extern "C" {
+#include <mm_camera_interface.h>
+#include <mm_jpeg_interface.h>
+}
+
+#if DISABLE_DEBUG_LOG
+
+inline void __null_log(int, const char *, const char *, ...) {}
+
+#ifdef ALOGD
+#undef ALOGD
+#define ALOGD(...) do { __null_log(0, LOG_TAG,__VA_ARGS__); } while (0)
+#endif
+
+#ifdef ALOGI
+#undef ALOGI
+#define ALOGI(...) do { __null_log(0, LOG_TAG,__VA_ARGS__); } while (0)
+#endif
+
+#ifdef CDBG
+#undef CDBG
+#define CDBG(...) do{} while(0)
+#endif
+
+#else
+
+
+#ifdef CDBG
+#undef CDBG
+#endif //#ifdef CDBG
+#define CDBG(fmt, args...) ALOGD_IF(gCamHalLogLevel >= 2, fmt, ##args)
+
+#ifdef CDBG_HIGH
+#undef CDBG_HIGH
+#endif //#ifdef CDBG_HIGH
+#define CDBG_HIGH(fmt, args...) ALOGD_IF(gCamHalLogLevel >= 1, fmt, ##args)
+
+#endif // DISABLE_DEBUG_LOG
+
+namespace qcamera {
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+typedef enum {
+    QCAMERA_CH_TYPE_ZSL,
+    QCAMERA_CH_TYPE_CAPTURE,
+    QCAMERA_CH_TYPE_PREVIEW,
+    QCAMERA_CH_TYPE_VIDEO,
+    QCAMERA_CH_TYPE_SNAPSHOT,
+    QCAMERA_CH_TYPE_RAW,
+    QCAMERA_CH_TYPE_METADATA,
+    QCAMERA_CH_TYPE_ANALYSIS,
+    QCAMERA_CH_TYPE_MAX
+} qcamera_ch_type_enum_t;
+
+typedef struct {
+    int32_t msg_type;
+    int32_t ext1;
+    int32_t ext2;
+} qcamera_evt_argm_t;
+
+#define QCAMERA_DUMP_FRM_PREVIEW    1
+#define QCAMERA_DUMP_FRM_VIDEO      (1<<1)
+#define QCAMERA_DUMP_FRM_SNAPSHOT   (1<<2)
+#define QCAMERA_DUMP_FRM_THUMBNAIL  (1<<3)
+#define QCAMERA_DUMP_FRM_RAW        (1<<4)
+#define QCAMERA_DUMP_FRM_JPEG       (1<<5)
+
+#define QCAMERA_DUMP_FRM_MASK_ALL    0x000000ff
+
+#define QCAMERA_ION_USE_CACHE   true
+#define QCAMERA_ION_USE_NOCACHE false
+#define MAX_ONGOING_JOBS 25
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+extern volatile uint32_t gCamHalLogLevel;
+
+typedef enum {
+    QCAMERA_NOTIFY_CALLBACK,
+    QCAMERA_DATA_CALLBACK,
+    QCAMERA_DATA_TIMESTAMP_CALLBACK,
+    QCAMERA_DATA_SNAPSHOT_CALLBACK
+} qcamera_callback_type_m;
+
+typedef void (*camera_release_callback)(void *user_data,
+                                        void *cookie,
+                                        int32_t cb_status);
+
+typedef struct {
+    qcamera_callback_type_m  cb_type;    // event type
+    int32_t                  msg_type;   // msg type
+    int32_t                  ext1;       // extended parameter
+    int32_t                  ext2;       // extended parameter
+    camera_memory_t *        data;       // ptr to data memory struct
+    unsigned int             index;      // index of the buf in the whole buffer
+    int64_t                  timestamp;  // buffer timestamp
+    camera_frame_metadata_t *metadata;   // meta data
+    void                    *user_data;  // any data needs to be released after callback
+    void                    *cookie;     // release callback cookie
+    camera_release_callback  release_cb; // release callback
+} qcamera_callback_argm_t;
+
+class QCameraCbNotifier {
+public:
+    QCameraCbNotifier(QCamera2HardwareInterface *parent) :
+                          mNotifyCb (NULL),
+                          mDataCb (NULL),
+                          mDataCbTimestamp (NULL),
+                          mCallbackCookie (NULL),
+                          mParent (parent),
+                          mDataQ(releaseNotifications, this),
+                          mActive(false){}
+
+    virtual ~QCameraCbNotifier();
+
+    virtual int32_t notifyCallback(qcamera_callback_argm_t &cbArgs);
+    virtual void setCallbacks(camera_notify_callback notifyCb,
+                              camera_data_callback dataCb,
+                              camera_data_timestamp_callback dataCbTimestamp,
+                              void *callbackCookie);
+    virtual int32_t startSnapshots();
+    virtual void stopSnapshots();
+    virtual void exit();
+    static void * cbNotifyRoutine(void * data);
+    static void releaseNotifications(void *data, void *user_data);
+    static bool matchSnapshotNotifications(void *data, void *user_data);
+    static bool matchPreviewNotifications(void *data, void *user_data);
+    virtual int32_t flushPreviewNotifications();
+private:
+
+    camera_notify_callback         mNotifyCb;
+    camera_data_callback           mDataCb;
+    camera_data_timestamp_callback mDataCbTimestamp;
+    void                          *mCallbackCookie;
+    QCamera2HardwareInterface     *mParent;
+
+    QCameraQueue     mDataQ;
+    QCameraCmdThread mProcTh;
+    bool             mActive;
+};
+class QCamera2HardwareInterface : public QCameraAllocator,
+        public QCameraThermalCallback, public QCameraAdjustFPS
+{
+public:
+    /* static variable and functions accessed by camera service */
+    static camera_device_ops_t mCameraOps;
+
+    static int set_preview_window(struct camera_device *,
+        struct preview_stream_ops *window);
+    static void set_CallBacks(struct camera_device *,
+        camera_notify_callback notify_cb,
+        camera_data_callback data_cb,
+        camera_data_timestamp_callback data_cb_timestamp,
+        camera_request_memory get_memory,
+        void *user);
+    static void enable_msg_type(struct camera_device *, int32_t msg_type);
+    static void disable_msg_type(struct camera_device *, int32_t msg_type);
+    static int msg_type_enabled(struct camera_device *, int32_t msg_type);
+    static int start_preview(struct camera_device *);
+    static void stop_preview(struct camera_device *);
+    static int preview_enabled(struct camera_device *);
+    static int store_meta_data_in_buffers(struct camera_device *, int enable);
+    static int start_recording(struct camera_device *);
+    static void stop_recording(struct camera_device *);
+    static int recording_enabled(struct camera_device *);
+    static void release_recording_frame(struct camera_device *, const void *opaque);
+    static int auto_focus(struct camera_device *);
+    static int cancel_auto_focus(struct camera_device *);
+    static int take_picture(struct camera_device *);
+    int takeLiveSnapshot_internal();
+    int takeBackendPic_internal(bool *JpegMemOpt, char *raw_format);
+    void clearIntPendingEvents();
+    void checkIntPicPending(bool JpegMemOpt, char *raw_format);
+    static int cancel_picture(struct camera_device *);
+    static int set_parameters(struct camera_device *, const char *parms);
+    static char* get_parameters(struct camera_device *);
+    static void put_parameters(struct camera_device *, char *);
+    static int send_command(struct camera_device *,
+              int32_t cmd, int32_t arg1, int32_t arg2);
+    static void release(struct camera_device *);
+    static int dump(struct camera_device *, int fd);
+    static int close_camera_device(hw_device_t *);
+
+    static int register_face_image(struct camera_device *,
+                                   void *img_ptr,
+                                   cam_pp_offline_src_config_t *config);
+public:
+    QCamera2HardwareInterface(uint32_t cameraId);
+    virtual ~QCamera2HardwareInterface();
+    int openCamera(struct hw_device_t **hw_device);
+
+    static int getCapabilities(uint32_t cameraId, struct camera_info *info);
+    static int initCapabilities(uint32_t cameraId, mm_camera_vtbl_t *cameraHandle);
+    cam_capability_t *getCamHalCapabilities();
+
+    // Implementation of QCameraAllocator
+    virtual QCameraMemory *allocateStreamBuf(cam_stream_type_t stream_type,
+            size_t size, int stride, int scanline, uint8_t &bufferCnt);
+    virtual int32_t allocateMoreStreamBuf(QCameraMemory *mem_obj,
+            size_t size, uint8_t &bufferCnt);
+
+    virtual QCameraHeapMemory *allocateStreamInfoBuf(cam_stream_type_t stream_type);
+    virtual QCameraHeapMemory *allocateMiscBuf(cam_stream_info_t *streamInfo);
+    virtual QCameraMemory *allocateStreamUserBuf(cam_stream_info_t *streamInfo);
+
+    // Implementation of QCameraThermalCallback
+    virtual int thermalEvtHandle(qcamera_thermal_level_enum_t *level,
+            void *userdata, void *data);
+
+    virtual int recalcFPSRange(int &minFPS, int &maxFPS,
+            cam_fps_range_t &adjustedRange);
+
+    friend class QCameraStateMachine;
+    friend class QCameraPostProcessor;
+    friend class QCameraCbNotifier;
+
+private:
+    int setPreviewWindow(struct preview_stream_ops *window);
+    int setCallBacks(
+        camera_notify_callback notify_cb,
+        camera_data_callback data_cb,
+        camera_data_timestamp_callback data_cb_timestamp,
+        camera_request_memory get_memory,
+        void *user);
+    int enableMsgType(int32_t msg_type);
+    int disableMsgType(int32_t msg_type);
+    int msgTypeEnabled(int32_t msg_type);
+    int msgTypeEnabledWithLock(int32_t msg_type);
+    int startPreview();
+    int stopPreview();
+    int storeMetaDataInBuffers(int enable);
+    int startRecording();
+    int stopRecording();
+    int releaseRecordingFrame(const void *opaque);
+    int autoFocus();
+    int cancelAutoFocus();
+    int takePicture();
+    int stopCaptureChannel(bool destroy);
+    int cancelPicture();
+    int takeLiveSnapshot();
+    int takePictureInternal();
+    int cancelLiveSnapshot();
+    char* getParameters();
+    int putParameters(char *);
+    int sendCommand(int32_t cmd, int32_t &arg1, int32_t &arg2);
+    int release();
+    int dump(int fd);
+    int registerFaceImage(void *img_ptr,
+                          cam_pp_offline_src_config_t *config,
+                          int32_t &faceID);
+    int32_t longShot();
+
+    int openCamera();
+    int closeCamera();
+
+    int processAPI(qcamera_sm_evt_enum_t api, void *api_payload);
+    int processEvt(qcamera_sm_evt_enum_t evt, void *evt_payload);
+    int processSyncEvt(qcamera_sm_evt_enum_t evt, void *evt_payload);
+    void lockAPI();
+    void waitAPIResult(qcamera_sm_evt_enum_t api_evt, qcamera_api_result_t *apiResult);
+    void unlockAPI();
+    void signalAPIResult(qcamera_api_result_t *result);
+    void signalEvtResult(qcamera_api_result_t *result);
+
+    int calcThermalLevel(qcamera_thermal_level_enum_t level,
+            const int minFPSi, const int maxFPSi, cam_fps_range_t &adjustedRange,
+            enum msm_vfe_frame_skip_pattern &skipPattern);
+    int updateThermalLevel(void *level);
+
+    // update entris to set parameters and check if restart is needed
+    int updateParameters(const char *parms, bool &needRestart);
+    // send request to server to set parameters
+    int commitParameterChanges();
+
+    bool isCaptureShutterEnabled();
+    bool needDebugFps();
+    bool isRegularCapture();
+    bool isCACEnabled();
+    bool is4k2kResolution(cam_dimension_t* resolution);
+    bool isAFRunning();
+    bool isPreviewRestartEnabled();
+    bool needReprocess();
+    bool needRotationReprocess();
+    bool needScaleReprocess();
+    void debugShowVideoFPS();
+    void debugShowPreviewFPS();
+    void dumpJpegToFile(const void *data, size_t size, uint32_t index);
+    void dumpFrameToFile(QCameraStream *stream,
+            mm_camera_buf_def_t *frame, uint32_t dump_type);
+    void dumpMetadataToFile(QCameraStream *stream,
+                            mm_camera_buf_def_t *frame,char *type);
+    void releaseSuperBuf(mm_camera_super_buf_t *super_buf);
+    void playShutter();
+    void getThumbnailSize(cam_dimension_t &dim);
+    uint32_t getJpegQuality();
+    inline bool getCancelAutoFocus(){ return mCancelAutoFocus; }
+    inline void setCancelAutoFocus(bool flag){ mCancelAutoFocus = flag; }
+    QCameraExif *getExifData();
+    cam_sensor_t getSensorType();
+
+    int32_t processAutoFocusEvent(cam_auto_focus_data_t &focus_data);
+    int32_t processZoomEvent(cam_crop_data_t &crop_info);
+    int32_t processPrepSnapshotDoneEvent(cam_prep_snapshot_state_t prep_snapshot_state);
+    int32_t processASDUpdate(cam_auto_scene_t scene);
+    int32_t processJpegNotify(qcamera_jpeg_evt_payload_t *jpeg_job);
+    int32_t processHDRData(cam_asd_hdr_scene_data_t hdr_scene);
+    int32_t processRetroAECUnlock();
+    int32_t processZSLCaptureDone();
+    int32_t processSceneData(cam_scene_mode_type scene);
+    int32_t transAwbMetaToParams(cam_awb_params_t &awb_params);
+    int32_t processFocusPositionInfo(cam_focus_pos_info_t &cur_pos_info);
+    int32_t processAEInfo(cam_3a_params_t &ae_params);
+
+    int32_t sendEvtNotify(int32_t msg_type, int32_t ext1, int32_t ext2);
+    int32_t sendDataNotify(int32_t msg_type,
+                           camera_memory_t *data,
+                           uint8_t index,
+                           camera_frame_metadata_t *metadata);
+
+    int32_t sendPreviewCallback(QCameraStream *stream,
+            QCameraGrallocMemory *memory, uint32_t idx);
+    int32_t selectScene(QCameraChannel *pChannel,
+            mm_camera_super_buf_t *recvd_frame);
+
+    int32_t addChannel(qcamera_ch_type_enum_t ch_type);
+    int32_t startChannel(qcamera_ch_type_enum_t ch_type);
+    int32_t stopChannel(qcamera_ch_type_enum_t ch_type);
+    int32_t delChannel(qcamera_ch_type_enum_t ch_type, bool destroy = true);
+    int32_t addPreviewChannel();
+    int32_t addSnapshotChannel();
+    int32_t addVideoChannel();
+    int32_t addZSLChannel();
+    int32_t addCaptureChannel();
+    int32_t addRawChannel();
+    int32_t addMetaDataChannel();
+    int32_t addAnalysisChannel();
+    QCameraReprocessChannel *addReprocChannel(QCameraChannel *pInputChannel);
+    QCameraReprocessChannel *addOfflineReprocChannel(
+                                                cam_pp_offline_src_config_t &img_config,
+                                                cam_pp_feature_config_t &pp_feature,
+                                                stream_cb_routine stream_cb,
+                                                void *userdata);
+    int32_t addStreamToChannel(QCameraChannel *pChannel,
+                               cam_stream_type_t streamType,
+                               stream_cb_routine streamCB,
+                               void *userData);
+    int32_t preparePreview();
+    void unpreparePreview();
+    int32_t prepareRawStream(QCameraChannel *pChannel);
+    QCameraChannel *getChannelByHandle(uint32_t channelHandle);
+    mm_camera_buf_def_t *getSnapshotFrame(mm_camera_super_buf_t *recvd_frame);
+    int32_t processFaceDetectionResult(cam_face_detection_data_t *fd_data);
+    int32_t processHistogramStats(cam_hist_stats_t &stats_data);
+    int32_t setHistogram(bool histogram_en);
+    int32_t setFaceDetection(bool enabled);
+    int32_t prepareHardwareForSnapshot(int32_t afNeeded);
+    bool needProcessPreviewFrame();
+    bool isNoDisplayMode() {return mParameters.isNoDisplayMode();};
+    bool isZSLMode() {return mParameters.isZSLMode();};
+    bool isRdiMode() {return mParameters.isRdiMode();};
+    uint8_t numOfSnapshotsExpected() {
+        return mParameters.getNumOfSnapshots();};
+    bool isSecureMode() {return mParameters.isSecureMode();};
+    bool isLongshotEnabled() { return mLongshotEnabled; };
+    bool isHFRMode() {return mParameters.isHfrMode();};
+    bool isLiveSnapshot() {return m_stateMachine.isRecording();};
+    void setRetroPicture(bool enable) { bRetroPicture = enable; };
+    bool isRetroPicture() {return bRetroPicture; };
+    bool isHDRMode() {return mParameters.isHDREnabled();};
+    uint8_t getBufNumRequired(cam_stream_type_t stream_type);
+    bool needFDMetadata(qcamera_ch_type_enum_t channel_type);
+    int32_t configureOnlineRotation(QCameraChannel &ch);
+    int32_t declareSnapshotStreams();
+    int32_t unconfigureAdvancedCapture();
+    int32_t configureAdvancedCapture();
+    int32_t configureAFBracketing(bool enable = true);
+    int32_t configureHDRBracketing();
+    int32_t stopAdvancedCapture(QCameraPicChannel *pChannel);
+    int32_t startAdvancedCapture(QCameraPicChannel *pChannel);
+    int32_t configureOptiZoom();
+    int32_t configureStillMore();
+    int32_t configureAEBracketing();
+    int32_t updatePostPreviewParameters();
+    inline void setOutputImageCount(uint32_t aCount) {mOutputCount = aCount;}
+    inline uint32_t getOutputImageCount() {return mOutputCount;}
+    bool processUFDumps(qcamera_jpeg_evt_payload_t *evt);
+    void captureDone();
+    int32_t updateMetadata(metadata_buffer_t *pMetaData);
+
+    int32_t getPPConfig(cam_pp_feature_config_t &pp_config, int curCount);
+    static void camEvtHandle(uint32_t camera_handle,
+                          mm_camera_event_t *evt,
+                          void *user_data);
+    static void jpegEvtHandle(jpeg_job_status_t status,
+                              uint32_t client_hdl,
+                              uint32_t jobId,
+                              mm_jpeg_output_t *p_buf,
+                              void *userdata);
+
+    static void *evtNotifyRoutine(void *data);
+
+    // functions for different data notify cb
+    static void zsl_channel_cb(mm_camera_super_buf_t *recvd_frame, void *userdata);
+    static void capture_channel_cb_routine(mm_camera_super_buf_t *recvd_frame,
+                                           void *userdata);
+    static void postproc_channel_cb_routine(mm_camera_super_buf_t *recvd_frame,
+                                            void *userdata);
+    static void rdi_mode_stream_cb_routine(mm_camera_super_buf_t *frame,
+                                              QCameraStream *stream,
+                                              void *userdata);
+    static void nodisplay_preview_stream_cb_routine(mm_camera_super_buf_t *frame,
+                                                    QCameraStream *stream,
+                                                    void *userdata);
+    static void preview_stream_cb_routine(mm_camera_super_buf_t *frame,
+                                          QCameraStream *stream,
+                                          void *userdata);
+    static void postview_stream_cb_routine(mm_camera_super_buf_t *frame,
+                                           QCameraStream *stream,
+                                           void *userdata);
+    static void video_stream_cb_routine(mm_camera_super_buf_t *frame,
+                                        QCameraStream *stream,
+                                        void *userdata);
+    static void snapshot_channel_cb_routine(mm_camera_super_buf_t *frame,
+           void *userdata);
+    static void raw_stream_cb_routine(mm_camera_super_buf_t *frame,
+                                      QCameraStream *stream,
+                                      void *userdata);
+    static void preview_raw_stream_cb_routine(mm_camera_super_buf_t * super_frame,
+                                              QCameraStream * stream,
+                                              void * userdata);
+    static void snapshot_raw_stream_cb_routine(mm_camera_super_buf_t * super_frame,
+                                               QCameraStream * stream,
+                                               void * userdata);
+    static void metadata_stream_cb_routine(mm_camera_super_buf_t *frame,
+                                           QCameraStream *stream,
+                                           void *userdata);
+    static void reprocess_stream_cb_routine(mm_camera_super_buf_t *frame,
+                                            QCameraStream *stream,
+                                            void *userdata);
+
+    static void releaseCameraMemory(void *data,
+                                    void *cookie,
+                                    int32_t cbStatus);
+    static void returnStreamBuffer(void *data,
+                                   void *cookie,
+                                   int32_t cbStatus);
+    static void getLogLevel();
+
+private:
+    camera_device_t   mCameraDevice;
+    uint32_t          mCameraId;
+    mm_camera_vtbl_t *mCameraHandle;
+    bool mCameraOpened;
+
+    preview_stream_ops_t *mPreviewWindow;
+    QCameraParameters mParameters;
+    int32_t               mMsgEnabled;
+    int                   mStoreMetaDataInFrame;
+
+    camera_notify_callback         mNotifyCb;
+    camera_data_callback           mDataCb;
+    camera_data_timestamp_callback mDataCbTimestamp;
+    camera_request_memory          mGetMemory;
+    void                          *mCallbackCookie;
+
+    QCameraStateMachine m_stateMachine;   // state machine
+    bool m_smThreadActive;
+    QCameraPostProcessor m_postprocessor; // post processor
+    QCameraThermalAdapter &m_thermalAdapter;
+    QCameraCbNotifier m_cbNotifier;
+    QCameraPerfLock m_perfLock;
+    pthread_mutex_t m_lock;
+    pthread_cond_t m_cond;
+    api_result_list *m_apiResultList;
+    QCameraMemoryPool m_memoryPool;
+
+    pthread_mutex_t m_evtLock;
+    pthread_cond_t m_evtCond;
+    qcamera_api_result_t m_evtResult;
+
+    pthread_mutex_t m_parm_lock;
+
+    QCameraChannel *m_channels[QCAMERA_CH_TYPE_MAX]; // array holding channel ptr
+
+    bool m_bPreviewStarted;             //flag indicates first preview frame callback is received
+    bool m_bRecordStarted;             //flag indicates Recording is started for first time
+
+    // Signifies if ZSL Retro Snapshots are enabled
+    bool bRetroPicture;
+    // Signifies AEC locked during zsl snapshots
+    bool m_bLedAfAecLock;
+    cam_autofocus_state_t m_currentFocusState;
+
+    uint32_t mDumpFrmCnt;  // frame dump count
+    uint32_t mDumpSkipCnt; // frame skip count
+    mm_jpeg_exif_params_t mExifParams;
+    qcamera_thermal_level_enum_t mThermalLevel;
+    bool mCancelAutoFocus;
+    bool m_HDRSceneEnabled;
+    bool mLongshotEnabled;
+
+    int32_t m_max_pic_width;
+    int32_t m_max_pic_height;
+    pthread_t mLiveSnapshotThread;
+    pthread_t mIntPicThread;
+    bool mFlashNeeded;
+    uint32_t mDeviceRotation;
+    uint32_t mCaptureRotation;
+    uint32_t mJpegExifRotation;
+    bool mUseJpegExifRotation;
+    bool mIs3ALocked;
+    bool mPrepSnapRun;
+    int32_t mZoomLevel;
+
+    int mVFrameCount;
+    int mVLastFrameCount;
+    nsecs_t mVLastFpsTime;
+    double mVFps;
+    int mPFrameCount;
+    int mPLastFrameCount;
+    nsecs_t mPLastFpsTime;
+    double mPFps;
+
+    //eztune variables for communication with eztune server at backend
+    bool m_bIntJpegEvtPending;
+    bool m_bIntRawEvtPending;
+    char m_BackendFileName[QCAMERA_MAX_FILEPATH_LENGTH];
+    size_t mBackendFileSize;
+    pthread_mutex_t m_int_lock;
+    pthread_cond_t m_int_cond;
+
+    enum DefferedWorkCmd {
+        CMD_DEFF_ALLOCATE_BUFF,
+        CMD_DEFF_PPROC_START,
+        CMD_DEFF_MAX
+    };
+
+    typedef struct {
+        QCameraChannel *ch;
+        cam_stream_type_t type;
+    } DefferAllocBuffArgs;
+
+    typedef union {
+        DefferAllocBuffArgs allocArgs;
+        QCameraChannel *pprocArgs;
+    } DefferWorkArgs;
+
+    bool mDeffOngoingJobs[MAX_ONGOING_JOBS];
+
+    struct DeffWork
+    {
+        DeffWork(DefferedWorkCmd cmd_,
+                 uint32_t id_,
+                 DefferWorkArgs args_)
+            : cmd(cmd_),
+              id(id_),
+              args(args_){};
+
+        DefferedWorkCmd cmd;
+        uint32_t id;
+        DefferWorkArgs args;
+    };
+
+    QCameraCmdThread      mDefferedWorkThread;
+    QCameraQueue          mCmdQueue;
+
+    Mutex                 mDeffLock;
+    Condition             mDeffCond;
+
+    int32_t queueDefferedWork(DefferedWorkCmd cmd,
+                              DefferWorkArgs args);
+    int32_t waitDefferedWork(int32_t &job_id);
+    static void *defferedWorkRoutine(void *obj);
+
+    int32_t mSnapshotJob;
+    int32_t mPostviewJob;
+    int32_t mMetadataJob;
+    int32_t mReprocJob;
+    int32_t mRawdataJob;
+    uint32_t mOutputCount;
+    uint32_t mInputCount;
+    bool mAdvancedCaptureConfigured;
+    bool mHDRBracketingEnabled;
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA2HARDWAREINTERFACE_H__ */
diff --git a/camera/QCamera2/HAL/QCamera2HWICallbacks.cpp b/camera/QCamera2/HAL/QCamera2HWICallbacks.cpp
new file mode 100644
index 0000000..c73ff13
--- /dev/null
+++ b/camera/QCamera2/HAL/QCamera2HWICallbacks.cpp
@@ -0,0 +1,2672 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define LOG_TAG "QCamera2HWI"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#include <time.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <utils/Errors.h>
+#include <utils/Trace.h>
+#include <utils/Timers.h>
+#include <QComOMXMetadata.h>
+#include "QCamera2HWI.h"
+
+namespace qcamera {
+
+/*===========================================================================
+ * FUNCTION   : zsl_channel_cb
+ *
+ * DESCRIPTION: helper function to handle ZSL superbuf callback directly from
+ *              mm-camera-interface
+ *
+ * PARAMETERS :
+ *   @recvd_frame : received super buffer
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : recvd_frame will be released after this call by caller, so if
+ *             async operation needed for recvd_frame, it's our responsibility
+ *             to save a copy for this variable to be used later.
+ *==========================================================================*/
+void QCamera2HardwareInterface::zsl_channel_cb(mm_camera_super_buf_t *recvd_frame,
+                                               void *userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s: E",__func__);
+    char value[PROPERTY_VALUE_MAX];
+    bool dump_raw = false;
+    bool dump_yuv = false;
+    bool log_matching = false;
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != recvd_frame->camera_handle){
+       ALOGE("%s: camera obj not valid", __func__);
+       return;
+    }
+
+    QCameraChannel *pChannel = pme->m_channels[QCAMERA_CH_TYPE_ZSL];
+    if (pChannel == NULL ||
+        pChannel->getMyHandle() != recvd_frame->ch_id) {
+        ALOGE("%s: ZSL channel doesn't exist, return here", __func__);
+        return;
+    }
+
+    if(pme->mParameters.isSceneSelectionEnabled() &&
+            !pme->m_stateMachine.isCaptureRunning()) {
+        pme->selectScene(pChannel, recvd_frame);
+        pChannel->bufDone(recvd_frame);
+        return;
+    }
+
+    CDBG_HIGH("%s: [ZSL Retro] Frame CB Unlock : %d, is AEC Locked: %d",
+          __func__, recvd_frame->bUnlockAEC, pme->m_bLedAfAecLock);
+    if(recvd_frame->bUnlockAEC && pme->m_bLedAfAecLock) {
+        qcamera_sm_internal_evt_payload_t *payload =
+                (qcamera_sm_internal_evt_payload_t *)malloc(
+                        sizeof(qcamera_sm_internal_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+            payload->evt_type = QCAMERA_INTERNAL_EVT_RETRO_AEC_UNLOCK;
+            int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: processEvt for retro AEC unlock failed", __func__);
+                free(payload);
+                payload = NULL;
+            }
+        } else {
+            ALOGE("%s: No memory for retro AEC event", __func__);
+        }
+    }
+
+    // Check if retro-active frames are completed and camera is
+    // ready to go ahead with LED estimation for regular frames
+    if (recvd_frame->bReadyForPrepareSnapshot) {
+      // Send an event
+      CDBG_HIGH("%s: [ZSL Retro] Ready for Prepare Snapshot, signal ", __func__);
+      qcamera_sm_internal_evt_payload_t *payload =
+         (qcamera_sm_internal_evt_payload_t *)malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+      if (NULL != payload) {
+        memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+        payload->evt_type = QCAMERA_INTERNAL_EVT_READY_FOR_SNAPSHOT;
+        int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+        if (rc != NO_ERROR) {
+          ALOGE("%s: processEvt Ready for Snaphot failed", __func__);
+          free(payload);
+          payload = NULL;
+        }
+      } else {
+        ALOGE("%s: No memory for prepare signal event detect"
+              " qcamera_sm_internal_evt_payload_t", __func__);
+      }
+    }
+
+    /* indicate the parent that capture is done */
+    pme->captureDone();
+
+    // save a copy for the superbuf
+    mm_camera_super_buf_t* frame =
+               (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+    if (frame == NULL) {
+        ALOGE("%s: Error allocating memory to save received_frame structure.", __func__);
+        pChannel->bufDone(recvd_frame);
+        return;
+    }
+    *frame = *recvd_frame;
+
+    if (recvd_frame->num_bufs > 0) {
+        ALOGI("[KPI Perf] %s: superbuf frame_idx %d", __func__,
+            recvd_frame->bufs[0]->frame_idx);
+    }
+
+    // DUMP RAW if available
+    property_get("persist.camera.zsl_raw", value, "0");
+    dump_raw = atoi(value) > 0 ? true : false;
+    if (dump_raw) {
+        for (uint32_t i = 0; i < recvd_frame->num_bufs; i++) {
+            if (recvd_frame->bufs[i]->stream_type == CAM_STREAM_TYPE_RAW) {
+                mm_camera_buf_def_t * raw_frame = recvd_frame->bufs[i];
+                QCameraStream *pStream = pChannel->getStreamByHandle(raw_frame->stream_id);
+                if (NULL != pStream) {
+                    pme->dumpFrameToFile(pStream, raw_frame, QCAMERA_DUMP_FRM_RAW);
+                }
+                break;
+            }
+        }
+    }
+
+    // DUMP YUV before reprocess if needed
+    property_get("persist.camera.zsl_yuv", value, "0");
+    dump_yuv = atoi(value) > 0 ? true : false;
+    if (dump_yuv) {
+        for (uint32_t i = 0; i < recvd_frame->num_bufs; i++) {
+            if (recvd_frame->bufs[i]->stream_type == CAM_STREAM_TYPE_SNAPSHOT) {
+                mm_camera_buf_def_t * yuv_frame = recvd_frame->bufs[i];
+                QCameraStream *pStream = pChannel->getStreamByHandle(yuv_frame->stream_id);
+                if (NULL != pStream) {
+                    pme->dumpFrameToFile(pStream, yuv_frame, QCAMERA_DUMP_FRM_SNAPSHOT);
+                }
+                break;
+            }
+        }
+    }
+    //
+    // whether need FD Metadata along with Snapshot frame in ZSL mode
+    if(pme->needFDMetadata(QCAMERA_CH_TYPE_ZSL)){
+        //Need Face Detection result for snapshot frames
+        //Get the Meta Data frames
+        mm_camera_buf_def_t *pMetaFrame = NULL;
+        for (uint32_t i = 0; i < frame->num_bufs; i++) {
+            QCameraStream *pStream = pChannel->getStreamByHandle(frame->bufs[i]->stream_id);
+            if (pStream != NULL) {
+                if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                    pMetaFrame = frame->bufs[i]; //find the metadata
+                    break;
+                }
+            }
+        }
+
+        if(pMetaFrame != NULL){
+            metadata_buffer_t *pMetaData = (metadata_buffer_t *)pMetaFrame->buffer;
+            //send the face detection info
+            uint8_t found = 0;
+            cam_face_detection_data_t faces_data;
+            IF_META_AVAILABLE(cam_face_detection_data_t, p_faces_data,
+                    CAM_INTF_META_FACE_DETECTION, pMetaData) {
+                faces_data = *p_faces_data;
+                found = 1;
+            } else {
+                memset(&faces_data, 0, sizeof(cam_face_detection_data_t));
+            }
+            faces_data.fd_type = QCAMERA_FD_SNAPSHOT; //HARD CODE here before MCT can support
+            if(!found){
+                faces_data.num_faces_detected = 0;
+            }else if(faces_data.num_faces_detected > MAX_ROI){
+                ALOGE("%s: Invalid number of faces %d",
+                    __func__, faces_data.num_faces_detected);
+            }
+            qcamera_sm_internal_evt_payload_t *payload =
+                (qcamera_sm_internal_evt_payload_t *)malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+            if (NULL != payload) {
+                memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+                payload->evt_type = QCAMERA_INTERNAL_EVT_FACE_DETECT_RESULT;
+                payload->faces_data = faces_data;
+                int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: processEvt face_detection_result failed", __func__);
+                    free(payload);
+                    payload = NULL;
+                }
+            } else {
+                ALOGE("%s: No memory for face_detection_result qcamera_sm_internal_evt_payload_t", __func__);
+            }
+        }
+    }
+
+    property_get("persist.camera.dumpmetadata", value, "0");
+    int32_t enabled = atoi(value);
+    if (enabled) {
+        mm_camera_buf_def_t *pMetaFrame = NULL;
+        QCameraStream *pStream = NULL;
+        for (uint32_t i = 0; i < frame->num_bufs; i++) {
+            pStream = pChannel->getStreamByHandle(frame->bufs[i]->stream_id);
+            if (pStream != NULL) {
+                if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                    pMetaFrame = frame->bufs[i];
+                    if (pMetaFrame != NULL &&
+                            ((metadata_buffer_t *)pMetaFrame->buffer)->is_tuning_params_valid) {
+                        pme->dumpMetadataToFile(pStream, pMetaFrame, (char *) "ZSL_Snapshot");
+                    }
+                    break;
+                }
+            }
+        }
+    }
+
+    property_get("persist.camera.zsl_matching", value, "0");
+    log_matching = atoi(value) > 0 ? true : false;
+    if (log_matching) {
+        CDBG_HIGH("%s : ZSL super buffer contains:", __func__);
+        QCameraStream *pStream = NULL;
+        for (uint32_t i = 0; i < frame->num_bufs; i++) {
+            pStream = pChannel->getStreamByHandle(frame->bufs[i]->stream_id);
+            if (pStream != NULL ) {
+                CDBG_HIGH("%s: Buffer with V4L index %d frame index %d of type %d Timestamp: %ld %ld ",
+                        __func__,
+                        frame->bufs[i]->buf_idx,
+                        frame->bufs[i]->frame_idx,
+                        pStream->getMyType(),
+                        frame->bufs[i]->ts.tv_sec,
+                        frame->bufs[i]->ts.tv_nsec);
+            }
+        }
+    }
+
+    // Wait on Postproc initialization if needed
+    pme->waitDefferedWork(pme->mReprocJob);
+
+    // send to postprocessor
+    pme->m_postprocessor.processData(frame);
+
+    CDBG_HIGH("[KPI Perf] %s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : selectScene
+ *
+ * DESCRIPTION: send a preview callback when a specific selected scene is applied
+ *
+ * PARAMETERS :
+ *   @pChannel: Camera channel
+ *   @frame   : Bundled super buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::selectScene(QCameraChannel *pChannel,
+        mm_camera_super_buf_t *frame)
+{
+    mm_camera_buf_def_t *pMetaFrame = NULL;
+    QCameraStream *pStream = NULL;
+    int32_t rc = NO_ERROR;
+
+    if ((NULL == frame) || (NULL == pChannel)) {
+        ALOGE("%s: Invalid scene select input", __func__);
+        return BAD_VALUE;
+    }
+
+    cam_scene_mode_type selectedScene = mParameters.getSelectedScene();
+    if (CAM_SCENE_MODE_MAX == selectedScene) {
+        ALOGV("%s: No selected scene", __func__);
+        return NO_ERROR;
+    }
+
+    for (uint32_t i = 0; i < frame->num_bufs; i++) {
+        pStream = pChannel->getStreamByHandle(frame->bufs[i]->stream_id);
+        if (pStream != NULL) {
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                pMetaFrame = frame->bufs[i];
+                break;
+            }
+        }
+    }
+
+    if (NULL == pMetaFrame) {
+        ALOGE("%s: No metadata buffer found in scene select super buffer", __func__);
+        return NO_INIT;
+    }
+
+    metadata_buffer_t *pMetaData = (metadata_buffer_t *)pMetaFrame->buffer;
+
+    IF_META_AVAILABLE(cam_scene_mode_type, scene, CAM_INTF_META_CURRENT_SCENE, pMetaData) {
+        if ((*scene == selectedScene) &&
+                (mDataCb != NULL) &&
+                (msgTypeEnabledWithLock(CAMERA_MSG_PREVIEW_FRAME) > 0)) {
+            mm_camera_buf_def_t *preview_frame = NULL;
+            for (uint32_t i = 0; i < frame->num_bufs; i++) {
+                pStream = pChannel->getStreamByHandle(frame->bufs[i]->stream_id);
+                if (pStream != NULL) {
+                    if (pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW)) {
+                        preview_frame = frame->bufs[i];
+                        break;
+                    }
+                }
+            }
+            if (preview_frame) {
+                QCameraGrallocMemory *memory = (QCameraGrallocMemory *)preview_frame->mem_info;
+                uint32_t idx = preview_frame->buf_idx;
+                rc = sendPreviewCallback(pStream, memory, idx);
+                if (NO_ERROR != rc) {
+                    ALOGE("%s: Error triggering scene select preview callback", __func__);
+                } else {
+                    mParameters.setSelectedScene(CAM_SCENE_MODE_MAX);
+                }
+            } else {
+                ALOGE("%s: No preview buffer found in scene select super buffer", __func__);
+                return NO_INIT;
+            }
+        }
+    } else {
+        ALOGE("%s: No current scene metadata!", __func__);
+        rc = NO_INIT;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : capture_channel_cb_routine
+ *
+ * DESCRIPTION: helper function to handle snapshot superbuf callback directly from
+ *              mm-camera-interface
+ *
+ * PARAMETERS :
+ *   @recvd_frame : received super buffer
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : recvd_frame will be released after this call by caller, so if
+ *             async operation needed for recvd_frame, it's our responsibility
+ *             to save a copy for this variable to be used later.
+*==========================================================================*/
+void QCamera2HardwareInterface::capture_channel_cb_routine(mm_camera_super_buf_t *recvd_frame,
+                                                           void *userdata)
+{
+    ATRACE_CALL();
+    char value[PROPERTY_VALUE_MAX];
+    CDBG_HIGH("[KPI Perf] %s: E PROFILE_YUV_CB_TO_HAL", __func__);
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != recvd_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        return;
+    }
+
+    QCameraChannel *pChannel = pme->m_channels[QCAMERA_CH_TYPE_CAPTURE];
+    if (pChannel == NULL ||
+        pChannel->getMyHandle() != recvd_frame->ch_id) {
+        ALOGE("%s: Capture channel doesn't exist, return here", __func__);
+        return;
+    }
+
+    // save a copy for the superbuf
+    mm_camera_super_buf_t* frame =
+               (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+    if (frame == NULL) {
+        ALOGE("%s: Error allocating memory to save received_frame structure.", __func__);
+        pChannel->bufDone(recvd_frame);
+        return;
+    }
+    *frame = *recvd_frame;
+
+    property_get("persist.camera.dumpmetadata", value, "0");
+    int32_t enabled = atoi(value);
+    if (enabled) {
+        mm_camera_buf_def_t *pMetaFrame = NULL;
+        QCameraStream *pStream = NULL;
+        for (uint32_t i = 0; i < frame->num_bufs; i++) {
+            pStream = pChannel->getStreamByHandle(frame->bufs[i]->stream_id);
+            if (pStream != NULL) {
+                if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                    pMetaFrame = frame->bufs[i]; //find the metadata
+                    if (pMetaFrame != NULL &&
+                            ((metadata_buffer_t *)pMetaFrame->buffer)->is_tuning_params_valid) {
+                        pme->dumpMetadataToFile(pStream, pMetaFrame, (char *) "Snapshot");
+                    }
+                    break;
+                }
+            }
+        }
+    }
+
+    // Wait on Postproc initialization if needed
+    pme->waitDefferedWork(pme->mReprocJob);
+
+    // send to postprocessor
+    pme->m_postprocessor.processData(frame);
+
+/* START of test register face image for face authentication */
+#ifdef QCOM_TEST_FACE_REGISTER_FACE
+    static uint8_t bRunFaceReg = 1;
+
+    if (bRunFaceReg > 0) {
+        // find snapshot frame
+        QCameraStream *main_stream = NULL;
+        mm_camera_buf_def_t *main_frame = NULL;
+        for (int i = 0; i < recvd_frame->num_bufs; i++) {
+            QCameraStream *pStream =
+                pChannel->getStreamByHandle(recvd_frame->bufs[i]->stream_id);
+            if (pStream != NULL) {
+                if (pStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT)) {
+                    main_stream = pStream;
+                    main_frame = recvd_frame->bufs[i];
+                    break;
+                }
+            }
+        }
+        if (main_stream != NULL && main_frame != NULL) {
+            int32_t faceId = -1;
+            cam_pp_offline_src_config_t config;
+            memset(&config, 0, sizeof(cam_pp_offline_src_config_t));
+            config.num_of_bufs = 1;
+            main_stream->getFormat(config.input_fmt);
+            main_stream->getFrameDimension(config.input_dim);
+            main_stream->getFrameOffset(config.input_buf_planes.plane_info);
+            CDBG_HIGH("DEBUG: registerFaceImage E");
+            int32_t rc = pme->registerFaceImage(main_frame->buffer, &config, faceId);
+            CDBG_HIGH("DEBUG: registerFaceImage X, ret=%d, faceId=%d", rc, faceId);
+            bRunFaceReg = 0;
+        }
+    }
+
+#endif
+/* END of test register face image for face authentication */
+
+    CDBG_HIGH("[KPI Perf] %s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : postproc_channel_cb_routine
+ *
+ * DESCRIPTION: helper function to handle postprocess superbuf callback directly from
+ *              mm-camera-interface
+ *
+ * PARAMETERS :
+ *   @recvd_frame : received super buffer
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : recvd_frame will be released after this call by caller, so if
+ *             async operation needed for recvd_frame, it's our responsibility
+ *             to save a copy for this variable to be used later.
+*==========================================================================*/
+void QCamera2HardwareInterface::postproc_channel_cb_routine(mm_camera_super_buf_t *recvd_frame,
+                                                            void *userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s: E", __func__);
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != recvd_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        return;
+    }
+
+    // save a copy for the superbuf
+    mm_camera_super_buf_t* frame =
+               (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+    if (frame == NULL) {
+        ALOGE("%s: Error allocating memory to save received_frame structure.", __func__);
+        return;
+    }
+    *frame = *recvd_frame;
+
+    // send to postprocessor
+    pme->m_postprocessor.processPPData(frame);
+
+    ATRACE_INT("Camera:Reprocess", 0);
+    CDBG_HIGH("[KPI Perf] %s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : preview_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle preview frame from preview stream in
+ *              normal case with display.
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done. The new
+ *             preview frame will be sent to display, and an older frame
+ *             will be dequeued from display and needs to be returned back
+ *             to kernel for future use.
+ *==========================================================================*/
+void QCamera2HardwareInterface::preview_stream_cb_routine(mm_camera_super_buf_t *super_frame,
+                                                          QCameraStream * stream,
+                                                          void *userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s : BEGIN", __func__);
+    int err = NO_ERROR;
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    QCameraGrallocMemory *memory = (QCameraGrallocMemory *)super_frame->bufs[0]->mem_info;
+
+    if (pme == NULL) {
+        ALOGE("%s: Invalid hardware object", __func__);
+        free(super_frame);
+        return;
+    }
+    if (memory == NULL) {
+        ALOGE("%s: Invalid memory object", __func__);
+        free(super_frame);
+        return;
+    }
+
+    mm_camera_buf_def_t *frame = super_frame->bufs[0];
+    if (NULL == frame) {
+        ALOGE("%s: preview frame is NLUL", __func__);
+        free(super_frame);
+        return;
+    }
+
+    if (!pme->needProcessPreviewFrame()) {
+        ALOGE("%s: preview is not running, no need to process", __func__);
+        stream->bufDone(frame->buf_idx);
+        free(super_frame);
+        return;
+    }
+
+    if (pme->needDebugFps()) {
+        pme->debugShowPreviewFPS();
+    }
+
+    uint32_t idx = frame->buf_idx;
+    pme->dumpFrameToFile(stream, frame, QCAMERA_DUMP_FRM_PREVIEW);
+
+    if(pme->m_bPreviewStarted) {
+       ALOGI("[KPI Perf] %s : PROFILE_FIRST_PREVIEW_FRAME", __func__);
+       pme->m_bPreviewStarted = false ;
+    }
+
+    // Display the buffer.
+    CDBG("%p displayBuffer %d E", pme, idx);
+    int dequeuedIdx = memory->displayBuffer(idx);
+    if (dequeuedIdx < 0 || dequeuedIdx >= memory->getCnt()) {
+        CDBG_HIGH("%s: Invalid dequeued buffer index %d from display",
+              __func__, dequeuedIdx);
+    } else {
+        // Return dequeued buffer back to driver
+        err = stream->bufDone((uint32_t)dequeuedIdx);
+        if ( err < 0) {
+            ALOGE("stream bufDone failed %d", err);
+        }
+    }
+
+    // Handle preview data callback
+    if (pme->mDataCb != NULL &&
+            (pme->msgTypeEnabledWithLock(CAMERA_MSG_PREVIEW_FRAME) > 0) &&
+            (!pme->mParameters.isSceneSelectionEnabled())) {
+        int32_t rc = pme->sendPreviewCallback(stream, memory, idx);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: Preview callback was not sent succesfully", __func__);
+        }
+    }
+
+    free(super_frame);
+    CDBG_HIGH("[KPI Perf] %s : END", __func__);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : sendPreviewCallback
+ *
+ * DESCRIPTION: helper function for triggering preview callbacks
+ *
+ * PARAMETERS :
+ *   @stream    : stream object
+ *   @memory    : Gralloc memory allocator
+ *   @idx       : buffer index
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::sendPreviewCallback(QCameraStream *stream,
+        QCameraGrallocMemory *memory, uint32_t idx)
+{
+    camera_memory_t *previewMem = NULL;
+    camera_memory_t *data = NULL;
+    camera_memory_t *dataToApp = NULL;
+    size_t previewBufSize = 0;
+    size_t previewBufSizeFromCallback = 0;
+    cam_dimension_t preview_dim;
+    cam_format_t previewFmt;
+    int32_t rc = NO_ERROR;
+    int32_t yStride = 0;
+    int32_t yScanline = 0;
+    int32_t uvStride = 0;
+    int32_t uvScanline = 0;
+    int32_t uStride = 0;
+    int32_t uScanline = 0;
+    int32_t vStride = 0;
+    int32_t vScanline = 0;
+    int32_t yStrideToApp = 0;
+    int32_t uvStrideToApp = 0;
+    int32_t yScanlineToApp = 0;
+    int32_t uvScanlineToApp = 0;
+    int32_t srcOffset = 0;
+    int32_t dstOffset = 0;
+    int32_t srcBaseOffset = 0;
+    int32_t dstBaseOffset = 0;
+    int i;
+
+    if ((NULL == stream) || (NULL == memory)) {
+        ALOGE("%s: Invalid preview callback input", __func__);
+        return BAD_VALUE;
+    }
+
+    cam_stream_info_t *streamInfo =
+            reinterpret_cast<cam_stream_info_t *>(stream->getStreamInfoBuf()->getPtr(0));
+    if (NULL == streamInfo) {
+        ALOGE("%s: Invalid streamInfo", __func__);
+        return BAD_VALUE;
+    }
+
+    stream->getFrameDimension(preview_dim);
+    stream->getFormat(previewFmt);
+
+    /* The preview buffer size in the callback should be
+     * (width*height*bytes_per_pixel). As all preview formats we support,
+     * use 12 bits per pixel, buffer size = previewWidth * previewHeight * 3/2.
+     * We need to put a check if some other formats are supported in future. */
+    if ((previewFmt == CAM_FORMAT_YUV_420_NV21) ||
+        (previewFmt == CAM_FORMAT_YUV_420_NV12) ||
+        (previewFmt == CAM_FORMAT_YUV_420_YV12)) {
+        if(previewFmt == CAM_FORMAT_YUV_420_YV12) {
+            yStride = streamInfo->buf_planes.plane_info.mp[0].stride;
+            yScanline = streamInfo->buf_planes.plane_info.mp[0].scanline;
+            uStride = streamInfo->buf_planes.plane_info.mp[1].stride;
+            uScanline = streamInfo->buf_planes.plane_info.mp[1].scanline;
+            vStride = streamInfo->buf_planes.plane_info.mp[2].stride;
+            vScanline = streamInfo->buf_planes.plane_info.mp[2].scanline;
+
+            previewBufSize = (size_t)
+                    (yStride * yScanline + uStride * uScanline + vStride * vScanline);
+            previewBufSizeFromCallback = previewBufSize;
+        } else {
+            yStride = streamInfo->buf_planes.plane_info.mp[0].stride;
+            yScanline = streamInfo->buf_planes.plane_info.mp[0].scanline;
+            uvStride = streamInfo->buf_planes.plane_info.mp[1].stride;
+            uvScanline = streamInfo->buf_planes.plane_info.mp[1].scanline;
+
+            yStrideToApp = preview_dim.width;
+            yScanlineToApp = preview_dim.height;
+            uvStrideToApp = yStrideToApp;
+            uvScanlineToApp = yScanlineToApp / 2;
+
+            previewBufSize = (size_t)
+                    ((yStrideToApp * yScanlineToApp) + (uvStrideToApp * uvScanlineToApp));
+
+            previewBufSizeFromCallback = (size_t)
+                    ((yStride * yScanline) + (uvStride * uvScanline));
+        }
+        if(previewBufSize == previewBufSizeFromCallback) {
+            previewMem = mGetMemory(memory->getFd(idx),
+                       previewBufSize, 1, mCallbackCookie);
+            if (!previewMem || !previewMem->data) {
+                ALOGE("%s: mGetMemory failed.\n", __func__);
+                return NO_MEMORY;
+            } else {
+                data = previewMem;
+            }
+        } else {
+            data = memory->getMemory(idx, false);
+            dataToApp = mGetMemory(-1, previewBufSize, 1, mCallbackCookie);
+            if (!dataToApp || !dataToApp->data) {
+                ALOGE("%s: mGetMemory failed.\n", __func__);
+                return NO_MEMORY;
+            }
+
+            for (i = 0; i < preview_dim.height; i++) {
+                srcOffset = i * yStride;
+                dstOffset = i * yStrideToApp;
+
+                memcpy((unsigned char *) dataToApp->data + dstOffset,
+                        (unsigned char *) data->data + srcOffset,
+                        (size_t)yStrideToApp);
+            }
+
+            srcBaseOffset = yStride * yScanline;
+            dstBaseOffset = yStrideToApp * yScanlineToApp;
+
+            for (i = 0; i < preview_dim.height/2; i++) {
+                srcOffset = i * uvStride + srcBaseOffset;
+                dstOffset = i * uvStrideToApp + dstBaseOffset;
+
+                memcpy((unsigned char *) dataToApp->data + dstOffset,
+                        (unsigned char *) data->data + srcOffset,
+                        (size_t)yStrideToApp);
+            }
+        }
+    } else {
+        data = memory->getMemory(idx, false);
+        ALOGE("%s: Invalid preview format, buffer size in preview callback may be wrong.",
+                __func__);
+    }
+    qcamera_callback_argm_t cbArg;
+    memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+    cbArg.cb_type = QCAMERA_DATA_CALLBACK;
+    cbArg.msg_type = CAMERA_MSG_PREVIEW_FRAME;
+    if (previewBufSize != 0 && previewBufSizeFromCallback != 0 &&
+            previewBufSize == previewBufSizeFromCallback) {
+        cbArg.data = data;
+    } else {
+        cbArg.data = dataToApp;
+    }
+    if ( previewMem ) {
+        cbArg.user_data = previewMem;
+        cbArg.release_cb = releaseCameraMemory;
+    } else if (dataToApp) {
+        cbArg.user_data = dataToApp;
+        cbArg.release_cb = releaseCameraMemory;
+    }
+    cbArg.cookie = this;
+    rc = m_cbNotifier.notifyCallback(cbArg);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: fail sending notification", __func__);
+        if (previewMem) {
+            previewMem->release(previewMem);
+        } else if (dataToApp) {
+            dataToApp->release(dataToApp);
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : nodisplay_preview_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle preview frame from preview stream in
+ *              no-display case
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done.
+ *==========================================================================*/
+void QCamera2HardwareInterface::nodisplay_preview_stream_cb_routine(
+                                                          mm_camera_super_buf_t *super_frame,
+                                                          QCameraStream *stream,
+                                                          void * userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s E",__func__);
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != super_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        // simply free super frame
+        free(super_frame);
+        return;
+    }
+    mm_camera_buf_def_t *frame = super_frame->bufs[0];
+    if (NULL == frame) {
+        ALOGE("%s: preview frame is NULL", __func__);
+        free(super_frame);
+        return;
+    }
+
+    if (!pme->needProcessPreviewFrame()) {
+        CDBG_HIGH("%s: preview is not running, no need to process", __func__);
+        stream->bufDone(frame->buf_idx);
+        free(super_frame);
+        return;
+    }
+
+    if (pme->needDebugFps()) {
+        pme->debugShowPreviewFPS();
+    }
+
+    QCameraMemory *previewMemObj = (QCameraMemory *)frame->mem_info;
+    camera_memory_t *preview_mem = NULL;
+    if (previewMemObj != NULL) {
+        preview_mem = previewMemObj->getMemory(frame->buf_idx, false);
+    }
+    if (NULL != previewMemObj && NULL != preview_mem) {
+        pme->dumpFrameToFile(stream, frame, QCAMERA_DUMP_FRM_PREVIEW);
+
+        if (pme->needProcessPreviewFrame() &&
+            pme->mDataCb != NULL &&
+            pme->msgTypeEnabledWithLock(CAMERA_MSG_PREVIEW_FRAME) > 0 ) {
+            qcamera_callback_argm_t cbArg;
+            memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+            cbArg.cb_type = QCAMERA_DATA_CALLBACK;
+            cbArg.msg_type = CAMERA_MSG_PREVIEW_FRAME;
+            cbArg.data = preview_mem;
+            cbArg.user_data = (void *) &frame->buf_idx;
+            cbArg.cookie = stream;
+            cbArg.release_cb = returnStreamBuffer;
+            int32_t rc = pme->m_cbNotifier.notifyCallback(cbArg);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: fail sending data notify", __func__);
+                stream->bufDone(frame->buf_idx);
+            }
+        } else {
+            stream->bufDone(frame->buf_idx);
+        }
+    }
+    free(super_frame);
+    CDBG_HIGH("[KPI Perf] %s X",__func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : rdi_mode_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle RDI frame from preview stream in
+ *              rdi mode case
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done.
+ *==========================================================================*/
+void QCamera2HardwareInterface::rdi_mode_stream_cb_routine(
+  mm_camera_super_buf_t *super_frame,
+  QCameraStream *stream,
+  void * userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("RDI_DEBUG %s[%d]: Enter", __func__, __LINE__);
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != super_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        free(super_frame);
+        return;
+    }
+    mm_camera_buf_def_t *frame = super_frame->bufs[0];
+    if (NULL == frame) {
+        ALOGE("%s: preview frame is NLUL", __func__);
+        goto end;
+    }
+    if (!pme->needProcessPreviewFrame()) {
+        ALOGE("%s: preview is not running, no need to process", __func__);
+        stream->bufDone(frame->buf_idx);
+        goto end;
+    }
+    if (pme->needDebugFps()) {
+        pme->debugShowPreviewFPS();
+    }
+    // Non-secure Mode
+    if (!pme->isSecureMode()) {
+        QCameraMemory *previewMemObj = (QCameraMemory *)frame->mem_info;
+        if (NULL == previewMemObj) {
+            ALOGE("%s: previewMemObj is NULL", __func__);
+            stream->bufDone(frame->buf_idx);
+            goto end;
+        }
+
+        camera_memory_t *preview_mem = previewMemObj->getMemory(frame->buf_idx, false);
+        if (NULL != preview_mem) {
+            previewMemObj->cleanCache(frame->buf_idx);
+            // Dump RAW frame
+            pme->dumpFrameToFile(stream, frame, QCAMERA_DUMP_FRM_RAW);
+            // Notify Preview callback frame
+            if (pme->needProcessPreviewFrame() &&
+                    pme->mDataCb != NULL &&
+                    pme->msgTypeEnabledWithLock(CAMERA_MSG_PREVIEW_FRAME) > 0) {
+                qcamera_callback_argm_t cbArg;
+                memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+                cbArg.cb_type    = QCAMERA_DATA_CALLBACK;
+                cbArg.msg_type   = CAMERA_MSG_PREVIEW_FRAME;
+                cbArg.data       = preview_mem;
+                cbArg.user_data = (void *) &frame->buf_idx;
+                cbArg.cookie     = stream;
+                cbArg.release_cb = returnStreamBuffer;
+                pme->m_cbNotifier.notifyCallback(cbArg);
+            } else {
+                ALOGE("%s: preview_mem is NULL", __func__);
+                stream->bufDone(frame->buf_idx);
+            }
+        }
+        else {
+            ALOGE("%s: preview_mem is NULL", __func__);
+            stream->bufDone(frame->buf_idx);
+        }
+    } else {
+        // Secure Mode
+        // We will do QCAMERA_NOTIFY_CALLBACK and share FD in case of secure mode
+        QCameraMemory *previewMemObj = (QCameraMemory *)frame->mem_info;
+        if (NULL == previewMemObj) {
+            ALOGE("%s: previewMemObj is NULL", __func__);
+            stream->bufDone(frame->buf_idx);
+            goto end;
+        }
+
+        int fd = previewMemObj->getFd(frame->buf_idx);
+        ALOGD("%s: Preview frame fd =%d for index = %d ", __func__, fd, frame->buf_idx);
+        if (pme->needProcessPreviewFrame() &&
+                pme->mDataCb != NULL &&
+                pme->msgTypeEnabledWithLock(CAMERA_MSG_PREVIEW_FRAME) > 0) {
+            // Prepare Callback structure
+            qcamera_callback_argm_t cbArg;
+            memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+            cbArg.cb_type    = QCAMERA_NOTIFY_CALLBACK;
+            cbArg.msg_type   = CAMERA_MSG_PREVIEW_FRAME;
+#ifndef VANILLA_HAL
+            cbArg.ext1       = CAMERA_FRAME_DATA_FD;
+            cbArg.ext2       = fd;
+#endif
+            cbArg.user_data  = (void *) &frame->buf_idx;
+            cbArg.cookie     = stream;
+            cbArg.release_cb = returnStreamBuffer;
+            pme->m_cbNotifier.notifyCallback(cbArg);
+        } else {
+            CDBG_HIGH("%s: No need to process preview frame, return buffer", __func__);
+            stream->bufDone(frame->buf_idx);
+        }
+    }
+end:
+    free(super_frame);
+    CDBG_HIGH("RDI_DEBUG %s[%d]: Exit", __func__, __LINE__);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : postview_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle post frame from postview stream
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done.
+ *==========================================================================*/
+void QCamera2HardwareInterface::postview_stream_cb_routine(mm_camera_super_buf_t *super_frame,
+                                                           QCameraStream *stream,
+                                                           void *userdata)
+{
+    ATRACE_CALL();
+    int err = NO_ERROR;
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    QCameraGrallocMemory *memory = (QCameraGrallocMemory *)super_frame->bufs[0]->mem_info;
+
+    if (pme == NULL) {
+        ALOGE("%s: Invalid hardware object", __func__);
+        free(super_frame);
+        return;
+    }
+    if (memory == NULL) {
+        ALOGE("%s: Invalid memory object", __func__);
+        free(super_frame);
+        return;
+    }
+
+    CDBG_HIGH("[KPI Perf] %s : BEGIN", __func__);
+
+    mm_camera_buf_def_t *frame = super_frame->bufs[0];
+    if (NULL == frame) {
+        ALOGE("%s: preview frame is NULL", __func__);
+        free(super_frame);
+        return;
+    }
+
+    QCameraMemory *memObj = (QCameraMemory *)frame->mem_info;
+    if (NULL != memObj) {
+        pme->dumpFrameToFile(stream, frame, QCAMERA_DUMP_FRM_THUMBNAIL);
+    }
+
+    // Return buffer back to driver
+    err = stream->bufDone(frame->buf_idx);
+    if ( err < 0) {
+        ALOGE("stream bufDone failed %d", err);
+    }
+
+    free(super_frame);
+    CDBG_HIGH("[KPI Perf] %s : END", __func__);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : video_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle video frame from video stream
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done. video
+ *             frame will be sent to video encoder. Once video encoder is
+ *             done with the video frame, it will call another API
+ *             (release_recording_frame) to return the frame back
+ *==========================================================================*/
+void QCamera2HardwareInterface::video_stream_cb_routine(mm_camera_super_buf_t *super_frame,
+                                                        QCameraStream *stream,
+                                                        void *userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s : BEGIN", __func__);
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != super_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        // simply free super frame
+        free(super_frame);
+        return;
+    }
+
+    mm_camera_buf_def_t *frame = super_frame->bufs[0];
+
+    if (pme->needDebugFps()) {
+        pme->debugShowVideoFPS();
+    }
+    if(pme->m_bRecordStarted) {
+       ALOGI("[KPI Perf] %s : PROFILE_FIRST_RECORD_FRAME", __func__);
+       pme->m_bRecordStarted = false ;
+    }
+    CDBG_HIGH("%s: Stream(%d), Timestamp: %ld %ld",
+          __func__,
+          frame->stream_id,
+          frame->ts.tv_sec,
+          frame->ts.tv_nsec);
+
+    if (frame->buf_type == CAM_STREAM_BUF_TYPE_MPLANE) {
+        nsecs_t timeStamp;
+        timeStamp = nsecs_t(frame->ts.tv_sec) * 1000000000LL + frame->ts.tv_nsec;
+        CDBG_HIGH("Send Video frame to services/encoder TimeStamp : %lld",
+            timeStamp);
+        QCameraMemory *videoMemObj = (QCameraMemory *)frame->mem_info;
+        camera_memory_t *video_mem = NULL;
+        if (NULL != videoMemObj) {
+            video_mem = videoMemObj->getMemory(frame->buf_idx,
+                    (pme->mStoreMetaDataInFrame > 0)? true : false);
+        }
+        if (NULL != videoMemObj && NULL != video_mem) {
+            pme->dumpFrameToFile(stream, frame, QCAMERA_DUMP_FRM_VIDEO);
+            if ((pme->mDataCbTimestamp != NULL) &&
+                pme->msgTypeEnabledWithLock(CAMERA_MSG_VIDEO_FRAME) > 0) {
+                qcamera_callback_argm_t cbArg;
+                memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+                cbArg.cb_type = QCAMERA_DATA_TIMESTAMP_CALLBACK;
+                cbArg.msg_type = CAMERA_MSG_VIDEO_FRAME;
+                cbArg.data = video_mem;
+                cbArg.timestamp = timeStamp;
+                int32_t rc = pme->m_cbNotifier.notifyCallback(cbArg);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: fail sending data notify", __func__);
+                    stream->bufDone(frame->buf_idx);
+                }
+            }
+        }
+    } else {
+        QCameraMemory *videoMemObj = (QCameraMemory *)frame->mem_info;
+        camera_memory_t *video_mem = NULL;
+        native_handle_t *nh = NULL;
+        int fd_cnt = frame->user_buf.bufs_used;
+        if (NULL != videoMemObj) {
+            video_mem = videoMemObj->getMemory(frame->buf_idx, true);
+            if (video_mem != NULL) {
+                struct encoder_media_buffer_type * packet =
+                        (struct encoder_media_buffer_type *)video_mem->data;
+                // fd cnt => Number of buffer FD's and buffer for offset, size, timestamp
+                packet->meta_handle = native_handle_create(fd_cnt, (3 * fd_cnt));
+                packet->buffer_type = kMetadataBufferTypeCameraSource;
+                nh = const_cast<native_handle_t *>(packet->meta_handle);
+            } else {
+                ALOGE("%s video_mem NULL", __func__);
+            }
+        } else {
+            ALOGE("%s videoMemObj NULL", __func__);
+        }
+
+        if (nh != NULL) {
+            nsecs_t timeStamp;
+            timeStamp = nsecs_t(frame->ts.tv_sec) * 1000000000LL
+                    + frame->ts.tv_nsec;
+            CDBG("Batch buffer TimeStamp : %lld FD = %d index = %d fd_cnt = %d",
+                    timeStamp, frame->fd, frame->buf_idx, fd_cnt);
+
+            for (int i = 0; i < fd_cnt; i++) {
+                if (frame->user_buf.buf_idx[i] >= 0) {
+                    mm_camera_buf_def_t *plane_frame =
+                            &frame->user_buf.plane_buf[frame->user_buf.buf_idx[i]];
+                    QCameraMemory *frameobj = (QCameraMemory *)plane_frame->mem_info;
+                    nsecs_t frame_ts = nsecs_t(plane_frame->ts.tv_sec) * 1000000000LL
+                            + plane_frame->ts.tv_nsec;
+                    /*data[0] => FD data[1] => OFFSET data[2] => SIZE data[3] => TIMESTAMP*/
+                    nh->data[i] = frameobj->getFd(plane_frame->buf_idx);
+                    nh->data[fd_cnt + i] = 0;
+                    nh->data[(2 * fd_cnt) + i] = (int)frameobj->getSize(plane_frame->buf_idx);
+                    nh->data[(3 * fd_cnt) + i] = (int)(frame_ts - timeStamp);
+                    CDBG("Send Video frames to services/encoder delta : %lld FD = %d index = %d",
+                            (frame_ts - timeStamp), plane_frame->fd, plane_frame->buf_idx);
+                    pme->dumpFrameToFile(stream, plane_frame, QCAMERA_DUMP_FRM_VIDEO);
+                }
+            }
+
+            if ((pme->mDataCbTimestamp != NULL) &&
+                        pme->msgTypeEnabledWithLock(CAMERA_MSG_VIDEO_FRAME) > 0) {
+                qcamera_callback_argm_t cbArg;
+                memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+                cbArg.cb_type = QCAMERA_DATA_TIMESTAMP_CALLBACK;
+                cbArg.msg_type = CAMERA_MSG_VIDEO_FRAME;
+                cbArg.data = video_mem;
+                cbArg.timestamp = timeStamp;
+                int32_t rc = pme->m_cbNotifier.notifyCallback(cbArg);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: fail sending data notify", __func__);
+                    stream->bufDone(frame->buf_idx);
+                }
+            }
+        } else {
+            ALOGE("%s: No Video Meta Available. Return Buffer", __func__);
+            stream->bufDone(super_frame->bufs[0]->buf_idx);
+        }
+    }
+    free(super_frame);
+    CDBG_HIGH("[KPI Perf] %s : END", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : snapshot_channel_cb_routine
+ *
+ * DESCRIPTION: helper function to handle snapshot frame from snapshot channel
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : recvd_frame will be released after this call by caller, so if
+ *             async operation needed for recvd_frame, it's our responsibility
+ *             to save a copy for this variable to be used later.
+ *==========================================================================*/
+void QCamera2HardwareInterface::snapshot_channel_cb_routine(mm_camera_super_buf_t *super_frame,
+       void *userdata)
+{
+    ATRACE_CALL();
+    char value[PROPERTY_VALUE_MAX];
+
+    CDBG_HIGH("[KPI Perf] %s: E", __func__);
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != super_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        // simply free super frame
+        free(super_frame);
+        return;
+    }
+
+    QCameraChannel *pChannel = pme->m_channels[QCAMERA_CH_TYPE_SNAPSHOT];
+    if ((pChannel == NULL) || (pChannel->getMyHandle() != super_frame->ch_id)) {
+        ALOGE("%s: Snapshot channel doesn't exist, return here", __func__);
+        return;
+    }
+
+    property_get("persist.camera.dumpmetadata", value, "0");
+    int32_t enabled = atoi(value);
+    if (enabled) {
+        if (pChannel == NULL ||
+            pChannel->getMyHandle() != super_frame->ch_id) {
+            ALOGE("%s: Capture channel doesn't exist, return here", __func__);
+            return;
+        }
+        mm_camera_buf_def_t *pMetaFrame = NULL;
+        QCameraStream *pStream = NULL;
+        for (uint32_t i = 0; i < super_frame->num_bufs; i++) {
+            pStream = pChannel->getStreamByHandle(super_frame->bufs[i]->stream_id);
+            if (pStream != NULL) {
+                if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                    pMetaFrame = super_frame->bufs[i]; //find the metadata
+                    if (pMetaFrame != NULL &&
+                            ((metadata_buffer_t *)pMetaFrame->buffer)->is_tuning_params_valid) {
+                        pme->dumpMetadataToFile(pStream, pMetaFrame, (char *) "Snapshot");
+                    }
+                    break;
+                }
+            }
+        }
+    }
+
+    // save a copy for the superbuf
+    mm_camera_super_buf_t* frame = (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+    if (frame == NULL) {
+        ALOGE("%s: Error allocating memory to save received_frame structure.",
+                __func__);
+        pChannel->bufDone(super_frame);
+        return;
+    }
+    *frame = *super_frame;
+
+    pme->m_postprocessor.processData(frame);
+
+    CDBG_HIGH("[KPI Perf] %s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : raw_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle raw dump frame from raw stream
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done. For raw
+ *             frame, there is no need to send to postprocessor for jpeg
+ *             encoding. this function will play shutter and send the data
+ *             callback to upper layer. Raw frame buffer will be returned
+ *             back to kernel, and frame will be free after use.
+ *==========================================================================*/
+void QCamera2HardwareInterface::raw_stream_cb_routine(mm_camera_super_buf_t * super_frame,
+                                                      QCameraStream * /*stream*/,
+                                                      void * userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s : BEGIN", __func__);
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != super_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        // simply free super frame
+        free(super_frame);
+        return;
+    }
+
+    pme->m_postprocessor.processRawData(super_frame);
+    CDBG_HIGH("[KPI Perf] %s : END", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : preview_raw_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle raw frame during standard preview
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done.
+ *==========================================================================*/
+void QCamera2HardwareInterface::preview_raw_stream_cb_routine(mm_camera_super_buf_t * super_frame,
+                                                              QCameraStream * stream,
+                                                              void * userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s : BEGIN", __func__);
+    char value[PROPERTY_VALUE_MAX];
+    bool dump_raw = false;
+
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != super_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        // simply free super frame
+        free(super_frame);
+        return;
+    }
+
+    property_get("persist.camera.preview_raw", value, "0");
+    dump_raw = atoi(value) > 0 ? true : false;
+
+    for (uint32_t i = 0; i < super_frame->num_bufs; i++) {
+        if (super_frame->bufs[i]->stream_type == CAM_STREAM_TYPE_RAW) {
+            mm_camera_buf_def_t * raw_frame = super_frame->bufs[i];
+            if (NULL != stream) {
+                if (dump_raw) {
+                    pme->dumpFrameToFile(stream, raw_frame, QCAMERA_DUMP_FRM_RAW);
+                }
+                stream->bufDone(super_frame->bufs[i]->buf_idx);
+            }
+            break;
+        }
+    }
+
+    free(super_frame);
+
+    CDBG_HIGH("[KPI Perf] %s : END", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : snapshot_raw_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle raw frame during standard capture
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done.
+ *==========================================================================*/
+void QCamera2HardwareInterface::snapshot_raw_stream_cb_routine(mm_camera_super_buf_t * super_frame,
+                                                               QCameraStream * stream,
+                                                               void * userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s : BEGIN", __func__);
+    char value[PROPERTY_VALUE_MAX];
+    bool dump_raw = false;
+
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != super_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        // simply free super frame
+        free(super_frame);
+        return;
+    }
+
+    property_get("persist.camera.snapshot_raw", value, "0");
+    dump_raw = atoi(value) > 0 ? true : false;
+
+    for (uint32_t i = 0; i < super_frame->num_bufs; i++) {
+        if (super_frame->bufs[i]->stream_type == CAM_STREAM_TYPE_RAW) {
+            mm_camera_buf_def_t * raw_frame = super_frame->bufs[i];
+            if (NULL != stream) {
+                if (dump_raw) {
+                    pme->dumpFrameToFile(stream, raw_frame, QCAMERA_DUMP_FRM_RAW);
+                }
+                stream->bufDone(super_frame->bufs[i]->buf_idx);
+            }
+            break;
+        }
+    }
+
+    free(super_frame);
+
+    CDBG_HIGH("[KPI Perf] %s : END", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : updateMetadata
+ *
+ * DESCRIPTION: Frame related parameter can be updated here
+ *
+ * PARAMETERS :
+ *   @pMetaData : pointer to metadata buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera2HardwareInterface::updateMetadata(metadata_buffer_t *pMetaData)
+{
+    int32_t rc = NO_ERROR;
+
+    if (pMetaData == NULL) {
+        ALOGE("%s: Null Metadata buffer", __func__);
+        return rc;
+    }
+
+    // Sharpness
+    cam_edge_application_t edge_application;
+    memset(&edge_application, 0x00, sizeof(cam_edge_application_t));
+    edge_application.sharpness = mParameters.getSharpness();
+    if (edge_application.sharpness != 0) {
+        edge_application.edge_mode = CAM_EDGE_MODE_FAST;
+    } else {
+        edge_application.edge_mode = CAM_EDGE_MODE_OFF;
+    }
+    ADD_SET_PARAM_ENTRY_TO_BATCH(pMetaData,
+            CAM_INTF_META_EDGE_MODE, edge_application);
+
+    //Effect
+    int32_t prmEffect = mParameters.getEffect();
+    ADD_SET_PARAM_ENTRY_TO_BATCH(pMetaData, CAM_INTF_PARM_EFFECT, prmEffect);
+
+    //flip
+    int32_t prmFlip = mParameters.getFlipMode(CAM_STREAM_TYPE_SNAPSHOT);
+    ADD_SET_PARAM_ENTRY_TO_BATCH(pMetaData, CAM_INTF_PARM_FLIP, prmFlip);
+
+    //denoise
+    uint8_t prmDenoise = (uint8_t)mParameters.isWNREnabled();
+    ADD_SET_PARAM_ENTRY_TO_BATCH(pMetaData,
+            CAM_INTF_META_NOISE_REDUCTION_MODE, prmDenoise);
+
+    //rotation & device rotation
+    uint32_t prmRotation = mParameters.getJpegRotation();
+    cam_rotation_info_t rotation_info;
+    if (prmRotation == 0) {
+       rotation_info.rotation = ROTATE_0;
+    } else if (prmRotation == 90) {
+       rotation_info.rotation = ROTATE_90;
+    } else if (prmRotation == 180) {
+       rotation_info.rotation = ROTATE_180;
+    } else if (prmRotation == 270) {
+       rotation_info.rotation = ROTATE_270;
+    }
+
+    uint32_t device_rotation = mParameters.getDeviceRotation();
+    if (device_rotation == 0) {
+        rotation_info.device_rotation = ROTATE_0;
+    } else if (device_rotation == 90) {
+        rotation_info.device_rotation = ROTATE_90;
+    } else if (device_rotation == 180) {
+        rotation_info.device_rotation = ROTATE_180;
+    } else if (device_rotation == 270) {
+        rotation_info.device_rotation = ROTATE_270;
+    } else {
+        rotation_info.device_rotation = ROTATE_0;
+    }
+    ADD_SET_PARAM_ENTRY_TO_BATCH(pMetaData, CAM_INTF_PARM_ROTATION, rotation_info);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : metadata_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle metadata frame from metadata stream
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done. Metadata
+ *             could have valid entries for face detection result or
+ *             histogram statistics information.
+ *==========================================================================*/
+void QCamera2HardwareInterface::metadata_stream_cb_routine(mm_camera_super_buf_t * super_frame,
+                                                           QCameraStream * stream,
+                                                           void * userdata)
+{
+    ATRACE_CALL();
+    CDBG("[KPI Perf] %s : BEGIN", __func__);
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != super_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        // simply free super frame
+        free(super_frame);
+        return;
+    }
+
+    mm_camera_buf_def_t *frame = super_frame->bufs[0];
+    metadata_buffer_t *pMetaData = (metadata_buffer_t *)frame->buffer;
+    if(pme->m_stateMachine.isNonZSLCaptureRunning()&&
+       !pme->mLongshotEnabled) {
+       //Make shutter call back in non ZSL mode once raw frame is received from VFE.
+       pme->playShutter();
+    }
+
+    if (pMetaData->is_tuning_params_valid && pme->mParameters.getRecordingHintValue() == true) {
+        //Dump Tuning data for video
+        pme->dumpMetadataToFile(stream,frame,(char *)"Video");
+    }
+
+    IF_META_AVAILABLE(cam_hist_stats_t, stats_data, CAM_INTF_META_HISTOGRAM, pMetaData) {
+        // process histogram statistics info
+        qcamera_sm_internal_evt_payload_t *payload =
+            (qcamera_sm_internal_evt_payload_t *)
+                malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+            payload->evt_type = QCAMERA_INTERNAL_EVT_HISTOGRAM_STATS;
+            payload->stats_data = *stats_data;
+            int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: processEvt histogram failed", __func__);
+                free(payload);
+                payload = NULL;
+
+            }
+        } else {
+            ALOGE("%s: No memory for histogram qcamera_sm_internal_evt_payload_t", __func__);
+        }
+    }
+
+    IF_META_AVAILABLE(cam_face_detection_data_t, faces_data,
+            CAM_INTF_META_FACE_DETECTION, pMetaData) {
+        if (faces_data->num_faces_detected > MAX_ROI) {
+            ALOGE("%s: Invalid number of faces %d",
+                __func__, faces_data->num_faces_detected);
+        } else {
+            // process face detection result
+            if (faces_data->num_faces_detected)
+                CDBG_HIGH("[KPI Perf] %s: PROFILE_NUMBER_OF_FACES_DETECTED %d",
+                    __func__,faces_data->num_faces_detected);
+            faces_data->fd_type = QCAMERA_FD_PREVIEW; //HARD CODE here before MCT can support
+            qcamera_sm_internal_evt_payload_t *payload = (qcamera_sm_internal_evt_payload_t *)
+                malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+            if (NULL != payload) {
+                memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+                payload->evt_type = QCAMERA_INTERNAL_EVT_FACE_DETECT_RESULT;
+                payload->faces_data = *faces_data;
+                int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: processEvt face detection failed", __func__);
+                    free(payload);
+                    payload = NULL;
+                }
+            } else {
+                ALOGE("%s: No memory for face detect qcamera_sm_internal_evt_payload_t", __func__);
+            }
+        }
+    }
+
+    IF_META_AVAILABLE(cam_auto_focus_data_t, focus_data,
+            CAM_INTF_META_AUTOFOCUS_DATA, pMetaData) {
+        qcamera_sm_internal_evt_payload_t *payload =
+            (qcamera_sm_internal_evt_payload_t *)malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+            payload->evt_type = QCAMERA_INTERNAL_EVT_FOCUS_UPDATE;
+            payload->focus_data = *focus_data;
+            payload->focus_data.focused_frame_idx = frame->frame_idx;
+            int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: processEvt focus failed", __func__);
+                free(payload);
+                payload = NULL;
+
+            }
+        } else {
+            ALOGE("%s: No memory for focus qcamera_sm_internal_evt_payload_t", __func__);
+        }
+    }
+
+    IF_META_AVAILABLE(cam_crop_data_t, crop_data, CAM_INTF_META_CROP_DATA, pMetaData) {
+        if (crop_data->num_of_streams > MAX_NUM_STREAMS) {
+            ALOGE("%s: Invalid num_of_streams %d in crop_data", __func__,
+                crop_data->num_of_streams);
+        } else {
+            qcamera_sm_internal_evt_payload_t *payload =
+                (qcamera_sm_internal_evt_payload_t *)
+                    malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+            if (NULL != payload) {
+                memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+                payload->evt_type = QCAMERA_INTERNAL_EVT_CROP_INFO;
+                payload->crop_data = *crop_data;
+                int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: processEvt crop info failed", __func__);
+                    free(payload);
+                    payload = NULL;
+
+                }
+            } else {
+                ALOGE("%s: No memory for prep_snapshot qcamera_sm_internal_evt_payload_t",
+                    __func__);
+            }
+        }
+    }
+
+    IF_META_AVAILABLE(int32_t, prep_snapshot_done_state,
+            CAM_INTF_META_PREP_SNAPSHOT_DONE, pMetaData) {
+        qcamera_sm_internal_evt_payload_t *payload =
+        (qcamera_sm_internal_evt_payload_t *)malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+            payload->evt_type = QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE;
+            payload->prep_snapshot_state = (cam_prep_snapshot_state_t)*prep_snapshot_done_state;
+            int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: processEvt prep_snapshot failed", __func__);
+                free(payload);
+                payload = NULL;
+
+            }
+        } else {
+            ALOGE("%s: No memory for prep_snapshot qcamera_sm_internal_evt_payload_t", __func__);
+        }
+    }
+
+    IF_META_AVAILABLE(cam_asd_hdr_scene_data_t, hdr_scene_data,
+            CAM_INTF_META_ASD_HDR_SCENE_DATA, pMetaData) {
+        CDBG_HIGH("%s: hdr_scene_data: %d %f\n", __func__,
+                hdr_scene_data->is_hdr_scene, hdr_scene_data->hdr_confidence);
+        //Handle this HDR meta data only if capture is not in process
+        if (!pme->m_stateMachine.isCaptureRunning()) {
+            qcamera_sm_internal_evt_payload_t *payload =
+                    (qcamera_sm_internal_evt_payload_t *)
+                    malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+            if (NULL != payload) {
+                memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+                payload->evt_type = QCAMERA_INTERNAL_EVT_HDR_UPDATE;
+                payload->hdr_data = *hdr_scene_data;
+                int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: processEvt hdr update failed", __func__);
+                    free(payload);
+                    payload = NULL;
+                }
+            } else {
+                ALOGE("%s: No memory for hdr update qcamera_sm_internal_evt_payload_t",
+                        __func__);
+            }
+        }
+    }
+
+    IF_META_AVAILABLE(int32_t, scene, CAM_INTF_META_ASD_SCENE_TYPE, pMetaData) {
+        qcamera_sm_internal_evt_payload_t *payload =
+            (qcamera_sm_internal_evt_payload_t *)malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+            payload->evt_type = QCAMERA_INTERNAL_EVT_ASD_UPDATE;
+            payload->asd_data = (cam_auto_scene_t)*scene;
+            int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: processEvt asd_update failed", __func__);
+                free(payload);
+                payload = NULL;
+            }
+        } else {
+            ALOGE("%s: No memory for asd_update qcamera_sm_internal_evt_payload_t", __func__);
+        }
+    }
+
+    IF_META_AVAILABLE(cam_awb_params_t, awb_params, CAM_INTF_META_AWB_INFO, pMetaData) {
+        CDBG_HIGH("%s, metadata for awb params.", __func__);
+        qcamera_sm_internal_evt_payload_t *payload =
+                (qcamera_sm_internal_evt_payload_t *)
+                malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+            payload->evt_type = QCAMERA_INTERNAL_EVT_AWB_UPDATE;
+            payload->awb_data = *awb_params;
+            int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: processEvt awb_update failed", __func__);
+                free(payload);
+                payload = NULL;
+            }
+        } else {
+            ALOGE("%s: No memory for awb_update qcamera_sm_internal_evt_payload_t", __func__);
+        }
+    }
+
+    IF_META_AVAILABLE(uint32_t, flash_mode, CAM_INTF_META_FLASH_MODE, pMetaData) {
+        pme->mExifParams.sensor_params.flash_mode = (cam_flash_mode_t)*flash_mode;
+    }
+
+    IF_META_AVAILABLE(int32_t, flash_state, CAM_INTF_META_FLASH_STATE, pMetaData) {
+        pme->mExifParams.sensor_params.flash_state = (cam_flash_state_t) *flash_state;
+    }
+
+    IF_META_AVAILABLE(float, aperture_value, CAM_INTF_META_LENS_APERTURE, pMetaData) {
+        pme->mExifParams.sensor_params.aperture_value = *aperture_value;
+    }
+
+    IF_META_AVAILABLE(cam_3a_params_t, ae_params, CAM_INTF_META_AEC_INFO, pMetaData) {
+        pme->mExifParams.cam_3a_params = *ae_params;
+        pme->mExifParams.cam_3a_params_valid = TRUE;
+        pme->mFlashNeeded = ae_params->flash_needed;
+        pme->mExifParams.cam_3a_params.brightness = (float) pme->mParameters.getBrightness();
+        qcamera_sm_internal_evt_payload_t *payload =
+                (qcamera_sm_internal_evt_payload_t *)
+                malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+            payload->evt_type = QCAMERA_INTERNAL_EVT_AE_UPDATE;
+            payload->ae_data = *ae_params;
+            int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: processEvt ae_update failed", __func__);
+                free(payload);
+                payload = NULL;
+            }
+        } else {
+            ALOGE("%s: No memory for ae_update qcamera_sm_internal_evt_payload_t", __func__);
+        }
+    }
+
+    IF_META_AVAILABLE(int32_t, wb_mode, CAM_INTF_PARM_WHITE_BALANCE, pMetaData) {
+        pme->mExifParams.cam_3a_params.wb_mode = (cam_wb_mode_type) *wb_mode;
+    }
+
+    IF_META_AVAILABLE(cam_sensor_params_t, sensor_params, CAM_INTF_META_SENSOR_INFO, pMetaData) {
+        pme->mExifParams.sensor_params = *sensor_params;
+    }
+
+    IF_META_AVAILABLE(cam_ae_exif_debug_t, ae_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_AE, pMetaData) {
+        pme->mExifParams.ae_debug_params = *ae_exif_debug_params;
+        pme->mExifParams.ae_debug_params_valid = TRUE;
+    }
+
+    IF_META_AVAILABLE(cam_awb_exif_debug_t, awb_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_AWB, pMetaData) {
+        pme->mExifParams.awb_debug_params = *awb_exif_debug_params;
+        pme->mExifParams.awb_debug_params_valid = TRUE;
+    }
+
+    IF_META_AVAILABLE(cam_af_exif_debug_t, af_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_AF, pMetaData) {
+        pme->mExifParams.af_debug_params = *af_exif_debug_params;
+        pme->mExifParams.af_debug_params_valid = TRUE;
+    }
+
+    IF_META_AVAILABLE(cam_asd_exif_debug_t, asd_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_ASD, pMetaData) {
+        pme->mExifParams.asd_debug_params = *asd_exif_debug_params;
+        pme->mExifParams.asd_debug_params_valid = TRUE;
+    }
+
+    IF_META_AVAILABLE(cam_stats_buffer_exif_debug_t, stats_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_STATS, pMetaData) {
+        pme->mExifParams.stats_debug_params = *stats_exif_debug_params;
+        pme->mExifParams.stats_debug_params_valid = TRUE;
+    }
+
+    IF_META_AVAILABLE(uint32_t, led_mode, CAM_INTF_META_LED_MODE_OVERRIDE, pMetaData) {
+        qcamera_sm_internal_evt_payload_t *payload =
+                (qcamera_sm_internal_evt_payload_t *)
+                malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+            payload->evt_type = QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE;
+            payload->led_data = (cam_flash_mode_t)*led_mode;
+            int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: processEvt led mode override failed", __func__);
+                free(payload);
+                payload = NULL;
+            }
+        } else {
+            ALOGE("%s: No memory for focus qcamera_sm_internal_evt_payload_t", __func__);
+        }
+    }
+
+    cam_edge_application_t edge_application;
+    memset(&edge_application, 0x00, sizeof(cam_edge_application_t));
+    edge_application.sharpness = pme->mParameters.getSharpness();
+    if (edge_application.sharpness != 0) {
+        edge_application.edge_mode = CAM_EDGE_MODE_FAST;
+    } else {
+        edge_application.edge_mode = CAM_EDGE_MODE_OFF;
+    }
+    ADD_SET_PARAM_ENTRY_TO_BATCH(pMetaData, CAM_INTF_META_EDGE_MODE, edge_application);
+
+    IF_META_AVAILABLE(cam_focus_pos_info_t, cur_pos_info,
+            CAM_INTF_META_FOCUS_POSITION, pMetaData) {
+        qcamera_sm_internal_evt_payload_t *payload =
+            (qcamera_sm_internal_evt_payload_t *)malloc(sizeof(qcamera_sm_internal_evt_payload_t));
+        if (NULL != payload) {
+            memset(payload, 0, sizeof(qcamera_sm_internal_evt_payload_t));
+            payload->evt_type = QCAMERA_INTERNAL_EVT_FOCUS_POS_UPDATE;
+            payload->focus_pos = *cur_pos_info;
+            int32_t rc = pme->processEvt(QCAMERA_SM_EVT_EVT_INTERNAL, payload);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: processEvt focus_pos_update failed", __func__);
+                free(payload);
+                payload = NULL;
+            }
+        } else {
+            ALOGE("%s: No memory for focus_pos_update qcamera_sm_internal_evt_payload_t", __func__);
+        }
+    }
+
+    stream->bufDone(frame->buf_idx);
+    free(super_frame);
+
+    CDBG("[KPI Perf] %s : END", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : reprocess_stream_cb_routine
+ *
+ * DESCRIPTION: helper function to handle reprocess frame from reprocess stream
+                (after reprocess, e.g., ZSL snapshot frame after WNR if
+ *              WNR is enabled)
+ *
+ * PARAMETERS :
+ *   @super_frame : received super buffer
+ *   @stream      : stream object
+ *   @userdata    : user data ptr
+ *
+ * RETURN    : None
+ *
+ * NOTE      : caller passes the ownership of super_frame, it's our
+ *             responsibility to free super_frame once it's done. In this
+ *             case, reprocessed frame need to be passed to postprocessor
+ *             for jpeg encoding.
+ *==========================================================================*/
+void QCamera2HardwareInterface::reprocess_stream_cb_routine(mm_camera_super_buf_t * super_frame,
+                                                            QCameraStream * /*stream*/,
+                                                            void * userdata)
+{
+    ATRACE_CALL();
+    CDBG_HIGH("[KPI Perf] %s: E", __func__);
+    QCamera2HardwareInterface *pme = (QCamera2HardwareInterface *)userdata;
+    if (pme == NULL ||
+        pme->mCameraHandle == NULL ||
+        pme->mCameraHandle->camera_handle != super_frame->camera_handle){
+        ALOGE("%s: camera obj not valid", __func__);
+        // simply free super frame
+        free(super_frame);
+        return;
+    }
+
+    pme->m_postprocessor.processPPData(super_frame);
+
+    CDBG_HIGH("[KPI Perf] %s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : dumpFrameToFile
+ *
+ * DESCRIPTION: helper function to dump jpeg into file for debug purpose.
+ *
+ * PARAMETERS :
+ *    @data : data ptr
+ *    @size : length of data buffer
+ *    @index : identifier for data
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera2HardwareInterface::dumpJpegToFile(const void *data,
+        size_t size, uint32_t index)
+{
+    char value[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.dumpimg", value, "0");
+    uint32_t enabled = (uint32_t) atoi(value);
+    uint32_t frm_num = 0;
+    uint32_t skip_mode = 0;
+
+    char buf[32];
+    cam_dimension_t dim;
+    memset(buf, 0, sizeof(buf));
+    memset(&dim, 0, sizeof(dim));
+
+    if(((enabled & QCAMERA_DUMP_FRM_JPEG) && data) ||
+        ((true == m_bIntJpegEvtPending) && data)) {
+        frm_num = ((enabled & 0xffff0000) >> 16);
+        if(frm_num == 0) {
+            frm_num = 10; //default 10 frames
+        }
+        if(frm_num > 256) {
+            frm_num = 256; //256 buffers cycle around
+        }
+        skip_mode = ((enabled & 0x0000ff00) >> 8);
+        if(skip_mode == 0) {
+            skip_mode = 1; //no-skip
+        }
+
+        if( mDumpSkipCnt % skip_mode == 0) {
+            if((frm_num == 256) && (mDumpFrmCnt >= frm_num)) {
+                // reset frame count if cycling
+                mDumpFrmCnt = 0;
+            }
+            if (mDumpFrmCnt <= frm_num) {
+                snprintf(buf, sizeof(buf), QCAMERA_DUMP_FRM_LOCATION "%d_%d.jpg",
+                        mDumpFrmCnt, index);
+                if (true == m_bIntJpegEvtPending) {
+                    strlcpy(m_BackendFileName, buf, sizeof(buf));
+                    mBackendFileSize = size;
+                }
+
+                int file_fd = open(buf, O_RDWR | O_CREAT, 0777);
+                if (file_fd >= 0) {
+                    ssize_t written_len = write(file_fd, data, size);
+                    fchmod(file_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+                    CDBG_HIGH("%s: written number of bytes %zd\n",
+                            __func__, written_len);
+                    close(file_fd);
+                } else {
+                    ALOGE("%s: fail t open file for image dumping", __func__);
+                }
+                if (false == m_bIntJpegEvtPending) {
+                    mDumpFrmCnt++;
+                }
+            }
+        }
+        mDumpSkipCnt++;
+    }
+}
+
+
+void QCamera2HardwareInterface::dumpMetadataToFile(QCameraStream *stream,
+                                                   mm_camera_buf_def_t *frame,char *type)
+{
+    char value[PROPERTY_VALUE_MAX];
+    uint32_t frm_num = 0;
+    metadata_buffer_t *metadata = (metadata_buffer_t *)frame->buffer;
+    property_get("persist.camera.dumpmetadata", value, "0");
+    uint32_t enabled = (uint32_t) atoi(value);
+    if (stream == NULL) {
+        CDBG_HIGH("No op");
+        return;
+    }
+
+    uint32_t dumpFrmCnt = stream->mDumpMetaFrame;
+    if(enabled){
+        frm_num = ((enabled & 0xffff0000) >> 16);
+        if (frm_num == 0) {
+            frm_num = 10; //default 10 frames
+        }
+        if (frm_num > 256) {
+            frm_num = 256; //256 buffers cycle around
+        }
+        if ((frm_num == 256) && (dumpFrmCnt >= frm_num)) {
+            // reset frame count if cycling
+            dumpFrmCnt = 0;
+        }
+        CDBG_HIGH("dumpFrmCnt= %u, frm_num = %u", dumpFrmCnt, frm_num);
+        if (dumpFrmCnt < frm_num) {
+            char timeBuf[128];
+            char buf[32];
+            memset(buf, 0, sizeof(buf));
+            memset(timeBuf, 0, sizeof(timeBuf));
+            time_t current_time;
+            struct tm * timeinfo;
+            time (&current_time);
+            timeinfo = localtime (&current_time);
+            if (NULL != timeinfo) {
+                strftime(timeBuf, sizeof(timeBuf),
+                        QCAMERA_DUMP_FRM_LOCATION "%Y%m%d%H%M%S", timeinfo);
+            }
+            String8 filePath(timeBuf);
+            snprintf(buf, sizeof(buf), "%um_%s_%d.bin", dumpFrmCnt, type, frame->frame_idx);
+            filePath.append(buf);
+            int file_fd = open(filePath.string(), O_RDWR | O_CREAT, 0777);
+            if (file_fd >= 0) {
+                ssize_t written_len = 0;
+                metadata->tuning_params.tuning_data_version = TUNING_DATA_VERSION;
+                void *data = (void *)((uint8_t *)&metadata->tuning_params.tuning_data_version);
+                written_len += write(file_fd, data, sizeof(uint32_t));
+                data = (void *)((uint8_t *)&metadata->tuning_params.tuning_sensor_data_size);
+                CDBG_HIGH("tuning_sensor_data_size %d",(int)(*(int *)data));
+                written_len += write(file_fd, data, sizeof(uint32_t));
+                data = (void *)((uint8_t *)&metadata->tuning_params.tuning_vfe_data_size);
+                CDBG_HIGH("tuning_vfe_data_size %d",(int)(*(int *)data));
+                written_len += write(file_fd, data, sizeof(uint32_t));
+                data = (void *)((uint8_t *)&metadata->tuning_params.tuning_cpp_data_size);
+                CDBG_HIGH("tuning_cpp_data_size %d",(int)(*(int *)data));
+                written_len += write(file_fd, data, sizeof(uint32_t));
+                data = (void *)((uint8_t *)&metadata->tuning_params.tuning_cac_data_size);
+                CDBG_HIGH("tuning_cac_data_size %d",(int)(*(int *)data));
+                written_len += write(file_fd, data, sizeof(uint32_t));
+                data = (void *)((uint8_t *)&metadata->tuning_params.tuning_cac_data_size2);
+                CDBG_HIGH("%s < skrajago >tuning_cac_data_size %d",__func__,(int)(*(int *)data));
+                written_len += write(file_fd, data, sizeof(uint32_t));
+                size_t total_size = metadata->tuning_params.tuning_sensor_data_size;
+                data = (void *)((uint8_t *)&metadata->tuning_params.data);
+                written_len += write(file_fd, data, total_size);
+                total_size = metadata->tuning_params.tuning_vfe_data_size;
+                data = (void *)((uint8_t *)&metadata->tuning_params.data[TUNING_VFE_DATA_OFFSET]);
+                written_len += write(file_fd, data, total_size);
+                total_size = metadata->tuning_params.tuning_cpp_data_size;
+                data = (void *)((uint8_t *)&metadata->tuning_params.data[TUNING_CPP_DATA_OFFSET]);
+                written_len += write(file_fd, data, total_size);
+                total_size = metadata->tuning_params.tuning_cac_data_size;
+                data = (void *)((uint8_t *)&metadata->tuning_params.data[TUNING_CAC_DATA_OFFSET]);
+                written_len += write(file_fd, data, total_size);
+                close(file_fd);
+            }else {
+                ALOGE("%s: fail t open file for image dumping", __func__);
+            }
+            dumpFrmCnt++;
+        }
+    }
+    stream->mDumpMetaFrame = dumpFrmCnt;
+}
+/*===========================================================================
+ * FUNCTION   : dumpFrameToFile
+ *
+ * DESCRIPTION: helper function to dump frame into file for debug purpose.
+ *
+ * PARAMETERS :
+ *    @data : data ptr
+ *    @size : length of data buffer
+ *    @index : identifier for data
+ *    @dump_type : type of the frame to be dumped. Only such
+ *                 dump type is enabled, the frame will be
+ *                 dumped into a file.
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera2HardwareInterface::dumpFrameToFile(QCameraStream *stream,
+        mm_camera_buf_def_t *frame, uint32_t dump_type)
+{
+    char value[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.dumpimg", value, "0");
+    uint32_t enabled = (uint32_t) atoi(value);
+    uint32_t frm_num = 0;
+    uint32_t skip_mode = 0;
+
+    if (NULL == stream) {
+        ALOGE("%s stream object is null", __func__);
+        return;
+    }
+
+    uint32_t dumpFrmCnt = stream->mDumpFrame;
+
+    if (true == m_bIntRawEvtPending) {
+        enabled = QCAMERA_DUMP_FRM_RAW;
+    }
+
+    if((enabled & QCAMERA_DUMP_FRM_MASK_ALL)) {
+        if((enabled & dump_type) && stream && frame) {
+            frm_num = ((enabled & 0xffff0000) >> 16);
+            if(frm_num == 0) {
+                frm_num = 10; //default 10 frames
+            }
+            if(frm_num > 256) {
+                frm_num = 256; //256 buffers cycle around
+            }
+            skip_mode = ((enabled & 0x0000ff00) >> 8);
+            if(skip_mode == 0) {
+                skip_mode = 1; //no-skip
+            }
+            if(stream->mDumpSkipCnt == 0)
+                stream->mDumpSkipCnt = 1;
+
+            if( stream->mDumpSkipCnt % skip_mode == 0) {
+                if((frm_num == 256) && (dumpFrmCnt >= frm_num)) {
+                    // reset frame count if cycling
+                    dumpFrmCnt = 0;
+                }
+                if (dumpFrmCnt <= frm_num) {
+                    char buf[32];
+                    char timeBuf[128];
+                    time_t current_time;
+                    struct tm * timeinfo;
+
+                    memset(timeBuf, 0, sizeof(timeBuf));
+
+                    time (&current_time);
+                    timeinfo = localtime (&current_time);
+                    memset(buf, 0, sizeof(buf));
+
+                    cam_dimension_t dim;
+                    memset(&dim, 0, sizeof(dim));
+                    stream->getFrameDimension(dim);
+
+                    cam_frame_len_offset_t offset;
+                    memset(&offset, 0, sizeof(cam_frame_len_offset_t));
+                    stream->getFrameOffset(offset);
+
+                    if (NULL != timeinfo) {
+                        strftime(timeBuf, sizeof(timeBuf),
+                                QCAMERA_DUMP_FRM_LOCATION "%Y%m%d%H%M%S", timeinfo);
+                    }
+                    String8 filePath(timeBuf);
+                    switch (dump_type) {
+                    case QCAMERA_DUMP_FRM_PREVIEW:
+                        {
+                            snprintf(buf, sizeof(buf), "%dp_%dx%d_%d.yuv",
+                                    dumpFrmCnt, dim.width, dim.height, frame->frame_idx);
+                        }
+                        break;
+                    case QCAMERA_DUMP_FRM_THUMBNAIL:
+                        {
+                            snprintf(buf, sizeof(buf), "%dt_%dx%d_%d.yuv",
+                                    dumpFrmCnt, dim.width, dim.height, frame->frame_idx);
+                        }
+                        break;
+                    case QCAMERA_DUMP_FRM_SNAPSHOT:
+                        {
+                            mParameters.getStreamDimension(CAM_STREAM_TYPE_SNAPSHOT, dim);
+                            snprintf(buf, sizeof(buf), "%ds_%dx%d_%d.yuv",
+                                    dumpFrmCnt, dim.width, dim.height, frame->frame_idx);
+                        }
+                        break;
+                    case QCAMERA_DUMP_FRM_VIDEO:
+                        {
+                            snprintf(buf, sizeof(buf), "%dv_%dx%d_%d.yuv",
+                                    dumpFrmCnt, dim.width, dim.height, frame->frame_idx);
+                        }
+                        break;
+                    case QCAMERA_DUMP_FRM_RAW:
+                        {
+                            mParameters.getStreamDimension(CAM_STREAM_TYPE_RAW, dim);
+                            snprintf(buf, sizeof(buf), "%dr_%dx%d_%d.raw",
+                                    dumpFrmCnt, dim.width, dim.height, frame->frame_idx);
+                        }
+                        break;
+                    case QCAMERA_DUMP_FRM_JPEG:
+                        {
+                            mParameters.getStreamDimension(CAM_STREAM_TYPE_SNAPSHOT, dim);
+                            snprintf(buf, sizeof(buf), "%dj_%dx%d_%d.yuv",
+                                    dumpFrmCnt, dim.width, dim.height, frame->frame_idx);
+                        }
+                        break;
+                    default:
+                        ALOGE("%s: Not supported for dumping stream type %d",
+                              __func__, dump_type);
+                        return;
+                    }
+
+                    filePath.append(buf);
+                    int file_fd = open(filePath.string(), O_RDWR | O_CREAT, 0777);
+                    ssize_t written_len = 0;
+                    if (file_fd >= 0) {
+                        void *data = NULL;
+
+                        fchmod(file_fd, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+                        for (uint32_t i = 0; i < offset.num_planes; i++) {
+                            uint32_t index = offset.mp[i].offset;
+                            if (i > 0) {
+                                index += offset.mp[i-1].len;
+                            }
+                            for (int j = 0; j < offset.mp[i].height; j++) {
+                                data = (void *)((uint8_t *)frame->buffer + index);
+                                written_len += write(file_fd, data,
+                                        (size_t)offset.mp[i].width);
+                                index += (uint32_t)offset.mp[i].stride;
+                            }
+                        }
+
+                        CDBG_HIGH("%s: written number of bytes %ld\n",
+                            __func__, written_len);
+                        close(file_fd);
+                    } else {
+                        ALOGE("%s: fail t open file for image dumping", __func__);
+                    }
+                    if (true == m_bIntRawEvtPending) {
+                        strlcpy(m_BackendFileName, filePath.string(), QCAMERA_MAX_FILEPATH_LENGTH);
+                        mBackendFileSize = (size_t)written_len;
+                    } else {
+                        dumpFrmCnt++;
+                    }
+                }
+            }
+            stream->mDumpSkipCnt++;
+        }
+    } else {
+        dumpFrmCnt = 0;
+    }
+    stream->mDumpFrame = dumpFrmCnt;
+}
+
+/*===========================================================================
+ * FUNCTION   : debugShowVideoFPS
+ *
+ * DESCRIPTION: helper function to log video frame FPS for debug purpose.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera2HardwareInterface::debugShowVideoFPS()
+{
+    mVFrameCount++;
+    nsecs_t now = systemTime();
+    nsecs_t diff = now - mVLastFpsTime;
+    if (diff > ms2ns(250)) {
+        mVFps = (((double)(mVFrameCount - mVLastFrameCount)) *
+                (double)(s2ns(1))) / (double)diff;
+        CDBG_HIGH("Video Frames Per Second: %.4f Cam ID = %d", mVFps, mCameraId);
+        mVLastFpsTime = now;
+        mVLastFrameCount = mVFrameCount;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : debugShowPreviewFPS
+ *
+ * DESCRIPTION: helper function to log preview frame FPS for debug purpose.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera2HardwareInterface::debugShowPreviewFPS()
+{
+    mPFrameCount++;
+    nsecs_t now = systemTime();
+    nsecs_t diff = now - mPLastFpsTime;
+    if (diff > ms2ns(250)) {
+        mPFps = (((double)(mPFrameCount - mPLastFrameCount)) *
+                (double)(s2ns(1))) / (double)diff;
+        CDBG_HIGH("[KPI Perf] %s: PROFILE_PREVIEW_FRAMES_PER_SECOND : %.4f Cam ID = %d",
+                __func__, mPFps, mCameraId);
+        mPLastFpsTime = now;
+        mPLastFrameCount = mPFrameCount;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraCbNotifier
+ *
+ * DESCRIPTION: Destructor for exiting the callback context.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraCbNotifier::~QCameraCbNotifier()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : exit
+ *
+ * DESCRIPTION: exit notify thread.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraCbNotifier::exit()
+{
+    mActive = false;
+    mProcTh.exit();
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseNotifications
+ *
+ * DESCRIPTION: callback for releasing data stored in the callback queue.
+ *
+ * PARAMETERS :
+ *   @data      : data to be released
+ *   @user_data : context data
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraCbNotifier::releaseNotifications(void *data, void *user_data)
+{
+    qcamera_callback_argm_t *arg = ( qcamera_callback_argm_t * ) data;
+
+    if ( ( NULL != arg ) && ( NULL != user_data ) ) {
+        if ( arg->release_cb ) {
+            arg->release_cb(arg->user_data, arg->cookie, FAILED_TRANSACTION);
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : matchSnapshotNotifications
+ *
+ * DESCRIPTION: matches snapshot data callbacks
+ *
+ * PARAMETERS :
+ *   @data      : data to match
+ *   @user_data : context data
+ *
+ * RETURN     : bool match
+ *              true - match found
+ *              false- match not found
+ *==========================================================================*/
+bool QCameraCbNotifier::matchSnapshotNotifications(void *data,
+                                                   void */*user_data*/)
+{
+    qcamera_callback_argm_t *arg = ( qcamera_callback_argm_t * ) data;
+    if ( NULL != arg ) {
+        if ( QCAMERA_DATA_SNAPSHOT_CALLBACK == arg->cb_type ) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : matchPreviewNotifications
+ *
+ * DESCRIPTION: matches preview data callbacks
+ *
+ * PARAMETERS :
+ *   @data      : data to match
+ *   @user_data : context data
+ *
+ * RETURN     : bool match
+ *              true - match found
+ *              false- match not found
+ *==========================================================================*/
+bool QCameraCbNotifier::matchPreviewNotifications(void *data,
+        void */*user_data*/)
+{
+    qcamera_callback_argm_t *arg = ( qcamera_callback_argm_t * ) data;
+    if (NULL != arg) {
+        if ((QCAMERA_DATA_CALLBACK == arg->cb_type) &&
+                (CAMERA_MSG_PREVIEW_FRAME == arg->msg_type)) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : cbNotifyRoutine
+ *
+ * DESCRIPTION: callback thread which interfaces with the upper layers
+ *              given input commands.
+ *
+ * PARAMETERS :
+ *   @data    : context data
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void * QCameraCbNotifier::cbNotifyRoutine(void * data)
+{
+    int running = 1;
+    int ret;
+    QCameraCbNotifier *pme = (QCameraCbNotifier *)data;
+    QCameraCmdThread *cmdThread = &pme->mProcTh;
+    cmdThread->setName("CAM_cbNotify");
+    uint8_t isSnapshotActive = FALSE;
+    bool longShotEnabled = false;
+    uint32_t numOfSnapshotExpected = 0;
+    uint32_t numOfSnapshotRcvd = 0;
+    int32_t cbStatus = NO_ERROR;
+
+    CDBG("%s: E", __func__);
+    do {
+        do {
+            ret = cam_sem_wait(&cmdThread->cmd_sem);
+            if (ret != 0 && errno != EINVAL) {
+                CDBG("%s: cam_sem_wait error (%s)",
+                           __func__, strerror(errno));
+                return NULL;
+            }
+        } while (ret != 0);
+
+        camera_cmd_type_t cmd = cmdThread->getCmd();
+        CDBG("%s: get cmd %d", __func__, cmd);
+        switch (cmd) {
+        case CAMERA_CMD_TYPE_START_DATA_PROC:
+            {
+                isSnapshotActive = TRUE;
+                numOfSnapshotExpected = pme->mParent->numOfSnapshotsExpected();
+                longShotEnabled = pme->mParent->isLongshotEnabled();
+                ALOGI("%s: Num Snapshots Expected = %d",
+                  __func__, numOfSnapshotExpected);
+                numOfSnapshotRcvd = 0;
+            }
+            break;
+        case CAMERA_CMD_TYPE_STOP_DATA_PROC:
+            {
+                pme->mDataQ.flushNodes(matchSnapshotNotifications);
+                isSnapshotActive = FALSE;
+
+                numOfSnapshotExpected = 0;
+                numOfSnapshotRcvd = 0;
+            }
+            break;
+        case CAMERA_CMD_TYPE_DO_NEXT_JOB:
+            {
+                qcamera_callback_argm_t *cb =
+                    (qcamera_callback_argm_t *)pme->mDataQ.dequeue();
+                cbStatus = NO_ERROR;
+                if (NULL != cb) {
+                    CDBG("%s: cb type %d received",
+                          __func__,
+                          cb->cb_type);
+
+                    if (pme->mParent->msgTypeEnabledWithLock(cb->msg_type)) {
+                        switch (cb->cb_type) {
+                        case QCAMERA_NOTIFY_CALLBACK:
+                            {
+                                if (cb->msg_type == CAMERA_MSG_FOCUS) {
+                                    ATRACE_INT("Camera:AutoFocus", 0);
+                                    CDBG_HIGH("[KPI Perf] %s : PROFILE_SENDING_FOCUS_EVT_TO APP",
+                                        __func__);
+                                }
+                                if (pme->mNotifyCb) {
+                                    pme->mNotifyCb(cb->msg_type,
+                                                  cb->ext1,
+                                                  cb->ext2,
+                                                  pme->mCallbackCookie);
+                                } else {
+                                    ALOGE("%s : notify callback not set!",
+                                          __func__);
+                                }
+                            }
+                            break;
+                        case QCAMERA_DATA_CALLBACK:
+                            {
+                                if (pme->mDataCb) {
+                                    pme->mDataCb(cb->msg_type,
+                                                 cb->data,
+                                                 cb->index,
+                                                 cb->metadata,
+                                                 pme->mCallbackCookie);
+                                } else {
+                                    ALOGE("%s : data callback not set!",
+                                          __func__);
+                                }
+                            }
+                            break;
+                        case QCAMERA_DATA_TIMESTAMP_CALLBACK:
+                            {
+                                if(pme->mDataCbTimestamp) {
+                                    pme->mDataCbTimestamp(cb->timestamp,
+                                                          cb->msg_type,
+                                                          cb->data,
+                                                          cb->index,
+                                                          pme->mCallbackCookie);
+                                } else {
+                                    ALOGE("%s:data cb with tmp not set!",
+                                          __func__);
+                                }
+                            }
+                            break;
+                        case QCAMERA_DATA_SNAPSHOT_CALLBACK:
+                            {
+                                if (TRUE == isSnapshotActive && pme->mDataCb ) {
+                                    if (!longShotEnabled) {
+                                        numOfSnapshotRcvd++;
+                                        ALOGI("%s: [ZSL Retro] Num Snapshots Received = %d", __func__,
+                                                numOfSnapshotRcvd);
+                                        if (numOfSnapshotExpected > 0 &&
+                                           (numOfSnapshotExpected == numOfSnapshotRcvd)) {
+                                            ALOGI("%s: [ZSL Retro] Expected snapshot received = %d",
+                                                    __func__, numOfSnapshotRcvd);
+                                            // notify HWI that snapshot is done
+                                            pme->mParent->processSyncEvt(QCAMERA_SM_EVT_SNAPSHOT_DONE,
+                                                                         NULL);
+                                        }
+                                    }
+                                    pme->mDataCb(cb->msg_type,
+                                                 cb->data,
+                                                 cb->index,
+                                                 cb->metadata,
+                                                 pme->mCallbackCookie);
+                                }
+                            }
+                            break;
+                        default:
+                            {
+                                ALOGE("%s : invalid cb type %d",
+                                      __func__,
+                                      cb->cb_type);
+                                cbStatus = BAD_VALUE;
+                            }
+                            break;
+                        };
+                    } else {
+                        ALOGE("%s : cb message type %d not enabled!",
+                              __func__,
+                              cb->msg_type);
+                        cbStatus = INVALID_OPERATION;
+                    }
+                    if ( cb->release_cb ) {
+                        cb->release_cb(cb->user_data, cb->cookie, cbStatus);
+                    }
+                    delete cb;
+                } else {
+                    ALOGE("%s: invalid cb type passed", __func__);
+                }
+            }
+            break;
+        case CAMERA_CMD_TYPE_EXIT:
+            {
+                running = 0;
+                pme->mDataQ.flush();
+            }
+            break;
+        default:
+            break;
+        }
+    } while (running);
+    CDBG("%s: X", __func__);
+
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : notifyCallback
+ *
+ * DESCRIPTION: Enqueus pending callback notifications for the upper layers.
+ *
+ * PARAMETERS :
+ *   @cbArgs  : callback arguments
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraCbNotifier::notifyCallback(qcamera_callback_argm_t &cbArgs)
+{
+    if (!mActive) {
+        ALOGE("%s: notify thread is not active", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    qcamera_callback_argm_t *cbArg = new qcamera_callback_argm_t();
+    if (NULL == cbArg) {
+        ALOGE("%s: no mem for qcamera_callback_argm_t", __func__);
+        return NO_MEMORY;
+    }
+    memset(cbArg, 0, sizeof(qcamera_callback_argm_t));
+    *cbArg = cbArgs;
+
+    if (mDataQ.enqueue((void *)cbArg)) {
+        return mProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    } else {
+        ALOGE("%s: Error adding cb data into queue", __func__);
+        delete cbArg;
+        return UNKNOWN_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setCallbacks
+ *
+ * DESCRIPTION: Initializes the callback functions, which would be used for
+ *              communication with the upper layers and launches the callback
+ *              context in which the callbacks will occur.
+ *
+ * PARAMETERS :
+ *   @notifyCb          : notification callback
+ *   @dataCb            : data callback
+ *   @dataCbTimestamp   : data with timestamp callback
+ *   @callbackCookie    : callback context data
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraCbNotifier::setCallbacks(camera_notify_callback notifyCb,
+                                     camera_data_callback dataCb,
+                                     camera_data_timestamp_callback dataCbTimestamp,
+                                     void *callbackCookie)
+{
+    if ( ( NULL == mNotifyCb ) &&
+         ( NULL == mDataCb ) &&
+         ( NULL == mDataCbTimestamp ) &&
+         ( NULL == mCallbackCookie ) ) {
+        mNotifyCb = notifyCb;
+        mDataCb = dataCb;
+        mDataCbTimestamp = dataCbTimestamp;
+        mCallbackCookie = callbackCookie;
+        mActive = true;
+        mProcTh.launch(cbNotifyRoutine, this);
+    } else {
+        ALOGE("%s : Camera callback notifier already initialized!",
+              __func__);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : flushPreviewNotifications
+ *
+ * DESCRIPTION: flush all pending preview notifications
+ *              from the notifier queue
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraCbNotifier::flushPreviewNotifications()
+{
+    if (!mActive) {
+        ALOGE("%s: notify thread is not active", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    mDataQ.flushNodes(matchPreviewNotifications);
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : startSnapshots
+ *
+ * DESCRIPTION: Enables snapshot mode
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraCbNotifier::startSnapshots()
+{
+    return mProcTh.sendCmd(CAMERA_CMD_TYPE_START_DATA_PROC, FALSE, TRUE);
+}
+
+/*===========================================================================
+ * FUNCTION   : stopSnapshots
+ *
+ * DESCRIPTION: Disables snapshot processing mode
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraCbNotifier::stopSnapshots()
+{
+    mProcTh.sendCmd(CAMERA_CMD_TYPE_STOP_DATA_PROC, FALSE, TRUE);
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL/QCameraAllocator.h b/camera/QCamera2/HAL/QCameraAllocator.h
new file mode 100644
index 0000000..a7036cf
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraAllocator.h
@@ -0,0 +1,55 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_ALLOCATOR__
+#define __QCAMERA_ALLOCATOR__
+
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+namespace qcamera {
+
+class QCameraMemory;
+class QCameraHeapMemory;
+
+class QCameraAllocator {
+public:
+    virtual QCameraMemory *allocateStreamBuf(cam_stream_type_t stream_type,
+            size_t size, int stride, int scanline, uint8_t &bufferCnt) = 0;
+    virtual int32_t allocateMoreStreamBuf(QCameraMemory *mem_obj,
+            size_t size, uint8_t &bufferCnt) = 0;
+    virtual QCameraHeapMemory *allocateStreamInfoBuf(cam_stream_type_t stream_type) = 0;
+    virtual QCameraHeapMemory *allocateMiscBuf(cam_stream_info_t *streamInfo) = 0;
+    virtual QCameraMemory *allocateStreamUserBuf(cam_stream_info_t *streamInfo) = 0;
+    virtual ~QCameraAllocator() {}
+};
+
+}; /* namespace qcamera */
+#endif /* __QCAMERA_ALLOCATOR__ */
diff --git a/camera/QCamera2/HAL/QCameraChannel.cpp b/camera/QCamera2/HAL/QCameraChannel.cpp
new file mode 100644
index 0000000..3ce8535
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraChannel.cpp
@@ -0,0 +1,1395 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define LOG_TAG "QCameraChannel"
+
+#include <utils/Errors.h>
+#include "QCameraParameters.h"
+#include "QCamera2HWI.h"
+#include "QCameraChannel.h"
+
+using namespace android;
+
+namespace qcamera {
+
+/*===========================================================================
+ * FUNCTION   : QCameraChannel
+ *
+ * DESCRIPTION: constrcutor of QCameraChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraChannel::QCameraChannel(uint32_t cam_handle,
+                               mm_camera_ops_t *cam_ops)
+{
+    m_camHandle = cam_handle;
+    m_camOps = cam_ops;
+    m_bIsActive = false;
+    m_bAllowDynBufAlloc = false;
+
+    m_handle = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraChannel
+ *
+ * DESCRIPTION: default constrcutor of QCameraChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraChannel::QCameraChannel()
+{
+    m_camHandle = 0;
+    m_camOps = NULL;
+    m_bIsActive = false;
+
+    m_handle = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraChannel
+ *
+ * DESCRIPTION: destructor of QCameraChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraChannel::~QCameraChannel()
+{
+    if (m_bIsActive) {
+        stop();
+    }
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if (mStreams[i] != NULL) {
+                if (m_handle == mStreams[i]->getChannelHandle()) {
+                    delete mStreams[i];
+                }
+        }
+    }
+    mStreams.clear();
+    m_camOps->delete_channel(m_camHandle, m_handle);
+    m_handle = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : deleteChannel
+ *
+ * DESCRIPTION: deletes a camera channel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraChannel::deleteChannel()
+{
+    if (m_bIsActive) {
+        stop();
+    }
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if ((mStreams[i] != NULL) && (m_handle == mStreams[i]->getChannelHandle())) {
+            mStreams[i]->deleteStream();
+        }
+    }
+    m_camOps->delete_channel(m_camHandle, m_handle);
+}
+
+/*===========================================================================
+ * FUNCTION   : init
+ *
+ * DESCRIPTION: initialization of channel
+ *
+ * PARAMETERS :
+ *   @attr    : channel bundle attribute setting
+ *   @dataCB  : data notify callback
+ *   @userData: user data ptr
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraChannel::init(mm_camera_channel_attr_t *attr,
+                             mm_camera_buf_notify_t dataCB,
+                             void *userData)
+{
+    m_handle = m_camOps->add_channel(m_camHandle,
+                                      attr,
+                                      dataCB,
+                                      userData);
+    if (m_handle == 0) {
+        ALOGE("%s: Add channel failed", __func__);
+        return UNKNOWN_ERROR;
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : addStream
+ *
+ * DESCRIPTION: add a stream into channel
+ *
+ * PARAMETERS :
+ *   @allocator      : stream related buffer allocator
+ *   @streamInfoBuf  : ptr to buf that contains stream info
+ *   @miscBuf        : ptr to buf that contains misc buffers
+ *   @minStreamBufNum: number of stream buffers needed
+ *   @paddingInfo    : padding information
+ *   @stream_cb      : stream data notify callback
+ *   @userdata       : user data ptr
+ *   @bDynAllocBuf   : flag indicating if allow allocate buffers in 2 steps
+ *   @online_rotation: rotation applied online
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraChannel::addStream(QCameraAllocator &allocator,
+        QCameraHeapMemory *streamInfoBuf, QCameraHeapMemory *miscBuf,
+        uint8_t minStreamBufNum, cam_padding_info_t *paddingInfo,
+        stream_cb_routine stream_cb, void *userdata, bool bDynAllocBuf,
+        bool bDeffAlloc, cam_rotation_t online_rotation)
+{
+    int32_t rc = NO_ERROR;
+    if (mStreams.size() >= MAX_STREAM_NUM_IN_BUNDLE) {
+        ALOGE("%s: stream number (%zu) exceeds max limit (%d)",
+              __func__, mStreams.size(), MAX_STREAM_NUM_IN_BUNDLE);
+        if (streamInfoBuf != NULL) {
+            streamInfoBuf->deallocate();
+            delete streamInfoBuf;
+            streamInfoBuf = NULL;
+        }
+        return BAD_VALUE;
+    }
+    QCameraStream *pStream = new QCameraStream(allocator,
+            m_camHandle, m_handle, m_camOps, paddingInfo, bDeffAlloc,
+            online_rotation);
+    if (pStream == NULL) {
+        ALOGE("%s: No mem for Stream", __func__);
+        if (streamInfoBuf != NULL) {
+            streamInfoBuf->deallocate();
+            delete streamInfoBuf;
+            streamInfoBuf = NULL;
+        }
+        return NO_MEMORY;
+    }
+
+    rc = pStream->init(streamInfoBuf, miscBuf, minStreamBufNum,
+                       stream_cb, userdata, bDynAllocBuf);
+    if (rc == 0) {
+        mStreams.add(pStream);
+    } else {
+        delete pStream;
+    }
+    return rc;
+}
+/*===========================================================================
+ * FUNCTION   : config
+ *
+ * DESCRIPTION: Configure any deffered channel streams
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraChannel::config()
+{
+    int32_t rc = NO_ERROR;
+    for (size_t i = 0; i < mStreams.size(); ++i) {
+        if ( mStreams[i]->isDeffered() ) {
+            rc = mStreams[i]->configStream();
+            if (rc != NO_ERROR) {
+                break;
+            }
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : linkStream
+ *
+ * DESCRIPTION: link a stream into channel
+ *
+ * PARAMETERS :
+ *   @ch      : Channel which the stream belongs to
+ *   @stream  : Stream which needs to be linked
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraChannel::linkStream(QCameraChannel *ch, QCameraStream *stream)
+{
+    int32_t rc = NO_ERROR;
+
+    if ((0 == m_handle) || (NULL == ch) || (NULL == stream)) {
+        return NO_INIT;
+    }
+
+    int32_t handle = m_camOps->link_stream(m_camHandle,
+            ch->getMyHandle(),
+            stream->getMyHandle(),
+            m_handle);
+    if (0 == handle) {
+        ALOGE("%s : Linking of stream failed", __func__);
+        rc = INVALID_OPERATION;
+    } else {
+        mStreams.add(stream);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : start
+ *
+ * DESCRIPTION: start channel, which will start all streams belong to this channel
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraChannel::start()
+{
+    int32_t rc = NO_ERROR;
+
+    if (mStreams.size() > 1) {
+        // there is more than one stream in the channel
+        // we need to notify mctl that all streams in this channel need to be bundled
+        cam_bundle_config_t bundleInfo;
+        memset(&bundleInfo, 0, sizeof(bundleInfo));
+        rc = m_camOps->get_bundle_info(m_camHandle, m_handle, &bundleInfo);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: get_bundle_info failed", __func__);
+            return rc;
+        }
+        if (bundleInfo.num_of_streams > 1) {
+            for (int i = 0; i < bundleInfo.num_of_streams; i++) {
+                QCameraStream *pStream = getStreamByServerID(bundleInfo.stream_ids[i]);
+                if (pStream != NULL) {
+                    if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                        // Skip metadata for reprocess now because PP module cannot handle meta data
+                        // May need furthur discussion if Imaginglib need meta data
+                        continue;
+                    }
+
+                    cam_stream_parm_buffer_t param;
+                    memset(&param, 0, sizeof(cam_stream_parm_buffer_t));
+                    param.type = CAM_STREAM_PARAM_TYPE_SET_BUNDLE_INFO;
+                    param.bundleInfo = bundleInfo;
+                    rc = pStream->setParameter(param);
+                    if (rc != NO_ERROR) {
+                        ALOGE("%s: stream setParameter for set bundle failed", __func__);
+                        return rc;
+                    }
+                }
+            }
+        }
+    }
+
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if ((mStreams[i] != NULL) &&
+                (m_handle == mStreams[i]->getChannelHandle())) {
+            mStreams[i]->start();
+        }
+    }
+    rc = m_camOps->start_channel(m_camHandle, m_handle);
+
+    if (rc != NO_ERROR) {
+        for (size_t i = 0; i < mStreams.size(); i++) {
+            if ((mStreams[i] != NULL) &&
+                    (m_handle == mStreams[i]->getChannelHandle())) {
+                mStreams[i]->stop();
+            }
+        }
+    } else {
+        m_bIsActive = true;
+        for (size_t i = 0; i < mStreams.size(); i++) {
+            if (mStreams[i] != NULL) {
+                mStreams[i]->cond_signal();
+            }
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop
+ *
+ * DESCRIPTION: stop a channel, which will stop all streams belong to this channel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraChannel::stop()
+{
+    int32_t rc = NO_ERROR;
+    ssize_t linkedIdx = -1;
+
+    if (!m_bIsActive) {
+        return NO_INIT;
+    }
+
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if (mStreams[i] != NULL) {
+               if (m_handle == mStreams[i]->getChannelHandle()) {
+                   mStreams[i]->stop();
+               } else {
+                   // Remove linked stream from stream list
+                   linkedIdx = (ssize_t)i;
+               }
+        }
+    }
+    if (linkedIdx > 0) {
+        mStreams.removeAt((size_t)linkedIdx);
+    }
+
+    rc = m_camOps->stop_channel(m_camHandle, m_handle);
+
+    m_bIsActive = false;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : bufDone
+ *
+ * DESCRIPTION: return a stream buf back to kernel
+ *
+ * PARAMETERS :
+ *   @recvd_frame  : stream buf frame to be returned
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraChannel::bufDone(mm_camera_super_buf_t *recvd_frame)
+{
+    int32_t rc = NO_ERROR;
+    for (uint32_t i = 0; i < recvd_frame->num_bufs; i++) {
+        if (recvd_frame->bufs[i] != NULL) {
+            for (size_t j = 0; j < mStreams.size(); j++) {
+                if (mStreams[j] != NULL &&
+                        mStreams[j]->getMyHandle() == recvd_frame->bufs[i]->stream_id) {
+                    rc = mStreams[j]->bufDone(recvd_frame->bufs[i]->buf_idx);
+                    break; // break loop j
+                }
+            }
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : processZoomDone
+ *
+ * DESCRIPTION: process zoom done event
+ *
+ * PARAMETERS :
+ *   @previewWindoe : ptr to preview window ops table, needed to set preview
+ *                    crop information
+ *   @crop_info     : crop info as a result of zoom operation
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraChannel::processZoomDone(preview_stream_ops_t *previewWindow,
+                                        cam_crop_data_t &crop_info)
+{
+    int32_t rc = NO_ERROR;
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if ((mStreams[i] != NULL) &&
+                (m_handle == mStreams[i]->getChannelHandle())) {
+            rc = mStreams[i]->processZoomDone(previewWindow, crop_info);
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamByHandle
+ *
+ * DESCRIPTION: return stream object by stream handle
+ *
+ * PARAMETERS :
+ *   @streamHandle : stream handle
+ *
+ * RETURN     : stream object. NULL if not found
+ *==========================================================================*/
+QCameraStream *QCameraChannel::getStreamByHandle(uint32_t streamHandle)
+{
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if (mStreams[i] != NULL && mStreams[i]->getMyHandle() == streamHandle) {
+            return mStreams[i];
+        }
+    }
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamByServerID
+ *
+ * DESCRIPTION: return stream object by stream server ID from daemon
+ *
+ * PARAMETERS :
+ *   @serverID : stream server ID
+ *
+ * RETURN     : stream object. NULL if not found
+ *==========================================================================*/
+QCameraStream *QCameraChannel::getStreamByServerID(uint32_t serverID)
+{
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if (mStreams[i] != NULL && mStreams[i]->getMyServerID() == serverID) {
+            return mStreams[i];
+        }
+    }
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamByIndex
+ *
+ * DESCRIPTION: return stream object by index of streams in the channel
+ *
+ * PARAMETERS :
+ *   @index : index of stream in the channel
+ *
+ * RETURN     : stream object. NULL if not found
+ *==========================================================================*/
+QCameraStream *QCameraChannel::getStreamByIndex(uint32_t index)
+{
+    if (index >= MAX_STREAM_NUM_IN_BUNDLE) {
+        return NULL;
+    }
+
+    if (index < mStreams.size()) {
+        return mStreams[index];
+    }
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : UpdateStreamBasedParameters
+ *
+ * DESCRIPTION: update any stream based settings from parameters
+ *
+ * PARAMETERS :
+ *   @param   : reference to parameters object
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraChannel::UpdateStreamBasedParameters(QCameraParameters &param)
+{
+    int32_t rc = NO_ERROR;
+    if (param.isPreviewFlipChanged()) {
+        // try to find preview stream
+        for (size_t i = 0; i < mStreams.size(); i++) {
+            if ((mStreams[i] != NULL) &&
+                    (m_handle == mStreams[i]->getChannelHandle()) &&
+                    (mStreams[i]->isTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
+                    (mStreams[i]->isOrignalTypeOf(CAM_STREAM_TYPE_PREVIEW))) ) {
+                cam_stream_parm_buffer_t param_buf;
+                memset(&param_buf, 0, sizeof(cam_stream_parm_buffer_t));
+                param_buf.type = CAM_STREAM_PARAM_TYPE_SET_FLIP;
+                param_buf.flipInfo.flip_mask =
+                        (uint32_t)param.getFlipMode(CAM_STREAM_TYPE_PREVIEW);
+                rc = mStreams[i]->setParameter(param_buf);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: set preview stream flip failed", __func__);
+                }
+            }
+        }
+    }
+    if (param.isVideoFlipChanged()) {
+        // try to find video stream
+        for (size_t i = 0; i < mStreams.size(); i++) {
+            if ((mStreams[i] != NULL) &&
+                    (m_handle == mStreams[i]->getChannelHandle()) &&
+                    (mStreams[i]->isTypeOf(CAM_STREAM_TYPE_VIDEO) ||
+                    (mStreams[i]->isOrignalTypeOf(CAM_STREAM_TYPE_VIDEO))) ) {
+                cam_stream_parm_buffer_t param_buf;
+                memset(&param_buf, 0, sizeof(cam_stream_parm_buffer_t));
+                param_buf.type = CAM_STREAM_PARAM_TYPE_SET_FLIP;
+                param_buf.flipInfo.flip_mask =
+                        (uint32_t)param.getFlipMode(CAM_STREAM_TYPE_VIDEO);
+                rc = mStreams[i]->setParameter(param_buf);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: set video stream flip failed", __func__);
+                }
+            }
+        }
+    }
+    if (param.isSnapshotFlipChanged()) {
+        // try to find snapshot/postview stream
+        for (size_t i = 0; i < mStreams.size(); i++) {
+            if (mStreams[i] != NULL &&
+                    (m_handle == mStreams[i]->getChannelHandle()) &&
+                    (mStreams[i]->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
+                     mStreams[i]->isOrignalTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
+                     mStreams[i]->isTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
+                 mStreams[i]->isOrignalTypeOf(CAM_STREAM_TYPE_POSTVIEW) ) ) {
+                cam_stream_parm_buffer_t param_buf;
+                memset(&param_buf, 0, sizeof(cam_stream_parm_buffer_t));
+                param_buf.type = CAM_STREAM_PARAM_TYPE_SET_FLIP;
+                param_buf.flipInfo.flip_mask =
+                        (uint32_t)param.getFlipMode(CAM_STREAM_TYPE_SNAPSHOT);
+                rc = mStreams[i]->setParameter(param_buf);
+                if (rc != NO_ERROR) {
+                    ALOGE("%s: set snapshot stream flip failed", __func__);
+                }
+            }
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraPicChannel
+ *
+ * DESCRIPTION: constructor of QCameraPicChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraPicChannel::QCameraPicChannel(uint32_t cam_handle,
+                                     mm_camera_ops_t *cam_ops) :
+    QCameraChannel(cam_handle, cam_ops)
+{
+    m_bAllowDynBufAlloc = true;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraPicChannel
+ *
+ * DESCRIPTION: default constructor of QCameraPicChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraPicChannel::QCameraPicChannel()
+{
+    m_bAllowDynBufAlloc = true;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraPicChannel
+ *
+ * DESCRIPTION: destructor of QCameraPicChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraPicChannel::~QCameraPicChannel()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : takePicture
+ *
+ * DESCRIPTION: send request for queued snapshot frames
+ *
+ * PARAMETERS :
+ *   @num_of_snapshot : number of snapshot frames requested
+ *   @num_of_retro_snapshot : number of retro snapshot frames requested
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPicChannel::takePicture (
+    uint8_t num_of_snapshot,
+    uint8_t num_of_retro_snapshot)
+{
+    int32_t rc = m_camOps->request_super_buf(m_camHandle,
+                                             m_handle,
+                                             num_of_snapshot,
+                                             num_of_retro_snapshot);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : cancelPicture
+ *
+ * DESCRIPTION: cancel request for queued snapshot frames
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPicChannel::cancelPicture()
+{
+    int32_t rc = m_camOps->cancel_super_buf_request(m_camHandle, m_handle);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stopAdvancedCapture
+ *
+ * DESCRIPTION: stop advanced capture based on advanced capture type.
+ *
+ * PARAMETERS :
+ *   @type : advanced capture type.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPicChannel::stopAdvancedCapture(mm_camera_advanced_capture_t type)
+{
+    int32_t rc = m_camOps->process_advanced_capture(m_camHandle,
+            m_handle, type, 0, NULL);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : startAdvancedCapture
+ *
+ * DESCRIPTION: start advanced capture based on advanced capture type.
+ *
+ * PARAMETERS :
+ *   @type : advanced capture type.
+ *   @config: advance capture config
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPicChannel::startAdvancedCapture(mm_camera_advanced_capture_t type,
+        cam_capture_frame_config_t *config)
+{
+    int32_t rc = NO_ERROR;
+
+    rc = m_camOps->process_advanced_capture(m_camHandle, m_handle, type,
+            1, config);
+    return rc;
+}
+
+/*===========================================================================
+* FUNCTION   : flushSuperbuffer
+ *
+ * DESCRIPTION: flush the all superbuffer frames.
+ *
+ * PARAMETERS :
+ *   @frame_idx : frame index of focused frame
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPicChannel::flushSuperbuffer(uint32_t frame_idx)
+{
+    int32_t rc = m_camOps->flush_super_buf_queue(m_camHandle, m_handle, frame_idx);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraVideoChannel
+ *
+ * DESCRIPTION: constructor of QCameraVideoChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraVideoChannel::QCameraVideoChannel(uint32_t cam_handle,
+                                         mm_camera_ops_t *cam_ops) :
+    QCameraChannel(cam_handle, cam_ops)
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraVideoChannel
+ *
+ * DESCRIPTION: default constructor of QCameraVideoChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraVideoChannel::QCameraVideoChannel()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraVideoChannel
+ *
+ * DESCRIPTION: destructor of QCameraVideoChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraVideoChannel::~QCameraVideoChannel()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseFrame
+ *
+ * DESCRIPTION: return video frame from app
+ *
+ * PARAMETERS :
+ *   @opaque     : ptr to video frame to be returned
+ *   @isMetaData : if frame is a metadata or real frame
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraVideoChannel::releaseFrame(const void * opaque, bool isMetaData)
+{
+    QCameraStream *pVideoStream = NULL;
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if (mStreams[i] != NULL && mStreams[i]->isTypeOf(CAM_STREAM_TYPE_VIDEO)) {
+            pVideoStream = mStreams[i];
+            break;
+        }
+    }
+
+    if (NULL == pVideoStream) {
+        ALOGE("%s: No video stream in the channel", __func__);
+        return BAD_VALUE;
+    }
+
+    int32_t rc = pVideoStream->bufDone(opaque, isMetaData);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraReprocessChannel
+ *
+ * DESCRIPTION: constructor of QCameraReprocessChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *   @pp_mask    : post-proccess feature mask
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraReprocessChannel::QCameraReprocessChannel(uint32_t cam_handle,
+                                                 mm_camera_ops_t *cam_ops) :
+    QCameraChannel(cam_handle, cam_ops),
+    m_pSrcChannel(NULL)
+{
+    memset(mSrcStreamHandles, 0, sizeof(mSrcStreamHandles));
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraReprocessChannel
+ *
+ * DESCRIPTION: default constructor of QCameraReprocessChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraReprocessChannel::QCameraReprocessChannel() :
+    m_pSrcChannel(NULL)
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraReprocessChannel
+ *
+ * DESCRIPTION: destructor of QCameraReprocessChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraReprocessChannel::~QCameraReprocessChannel()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : addReprocStreamsFromSource
+ *
+ * DESCRIPTION: add reprocess streams from input source channel
+ *
+ * PARAMETERS :
+ *   @allocator      : stream related buffer allocator
+ *   @featureConfig  : pp feature configuration
+ *   @pSrcChannel    : ptr to input source channel that needs reprocess
+ *   @minStreamBufNum: number of stream buffers needed
+ *   @burstNum       : number of burst captures needed
+ *   @paddingInfo    : padding information
+ *   @param          : reference to parameters
+ *   @contStream     : continous streaming mode or burst
+ *   @offline        : configure for offline reprocessing
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocessChannel::addReprocStreamsFromSource(
+        QCameraAllocator& allocator, cam_pp_feature_config_t &featureConfig,
+        QCameraChannel *pSrcChannel, uint8_t minStreamBufNum, uint8_t burstNum,
+        cam_padding_info_t *paddingInfo, QCameraParameters &param, bool contStream,
+        bool offline)
+{
+    int32_t rc = 0;
+    QCameraStream *pStream = NULL;
+    QCameraHeapMemory *pStreamInfoBuf = NULL;
+    QCameraHeapMemory *pMiscBuf = NULL;
+    cam_stream_info_t *streamInfo = NULL;
+    cam_padding_info_t padding;
+
+    memset(mSrcStreamHandles, 0, sizeof(mSrcStreamHandles));
+    if (NULL == paddingInfo) {
+        return BAD_VALUE;
+    }
+    padding = *paddingInfo;
+    //Use maximum padding so that the buffer
+    //can be rotated
+    padding.width_padding = MAX(padding.width_padding, padding.height_padding);
+    padding.height_padding = padding.width_padding;
+
+    CDBG("%s : %d: num of src stream = %d", __func__, __LINE__, pSrcChannel->getNumOfStreams());
+
+    for (uint32_t i = 0; i < pSrcChannel->getNumOfStreams(); i++) {
+        pStream = pSrcChannel->getStreamByIndex(i);
+        if (pStream != NULL) {
+            if (param.getofflineRAW() && !pStream->isTypeOf(CAM_STREAM_TYPE_RAW)) {
+                //Skip all the stream other than RAW incase of offline of RAW
+                continue;
+            }
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_RAW) && !param.getofflineRAW()) {
+                // Skip raw for reprocess now because PP module cannot handle
+                // meta data&raw. May need furthur discussion if Imaginglib need meta data
+                continue;
+            }
+
+            if ((pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) ||
+                    (pStream->isTypeOf(CAM_STREAM_TYPE_ANALYSIS))) {
+                // Skip metadata
+                continue;
+            }
+
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
+                pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW)) {
+                // Skip postview: in non zsl case, dont want to send
+                // thumbnail through reprocess.
+                // Skip preview: for same reason for zsl case
+                continue;
+            }
+
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
+                    pStream->isTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
+                    pStream->isOrignalTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
+                    pStream->isOrignalTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
+                    (param.getofflineRAW() && pStream->isTypeOf(CAM_STREAM_TYPE_RAW))) {
+                uint32_t feature_mask = featureConfig.feature_mask;
+
+                if ((feature_mask & ~CAM_QCOM_FEATURE_HDR) == 0
+                        && param.isHDREnabled()
+                        && !param.isHDRThumbnailProcessNeeded()) {
+
+                    // Skip thumbnail stream reprocessing in HDR
+                    // if only hdr is enabled
+                    continue;
+                }
+
+                // skip thumbnail reprocessing if not needed
+                if (!param.needThumbnailReprocess(&feature_mask)) {
+                    continue;
+                }
+
+                //Don't do WNR for thumbnail
+                feature_mask &= ~CAM_QCOM_FEATURE_DENOISE2D;
+                if (!feature_mask) {
+                    // Skip thumbnail stream reprocessing since no other
+                    //reprocessing is enabled.
+                    continue;
+                }
+            }
+
+            pStreamInfoBuf = allocator.allocateStreamInfoBuf(CAM_STREAM_TYPE_OFFLINE_PROC);
+            if (pStreamInfoBuf == NULL) {
+                ALOGE("%s: no mem for stream info buf", __func__);
+                rc = NO_MEMORY;
+                break;
+            }
+
+            streamInfo = (cam_stream_info_t *)pStreamInfoBuf->getPtr(0);
+            memset(streamInfo, 0, sizeof(cam_stream_info_t));
+            streamInfo->stream_type = CAM_STREAM_TYPE_OFFLINE_PROC;
+            // Enable CPP high performance mode to put it in turbo frequency mode for
+            // burst/longshot/HDR snapshot cases
+            streamInfo->perf_mode = CAM_PERF_HIGH_PERFORMANCE;
+            if (param.getofflineRAW() && pStream->isTypeOf(CAM_STREAM_TYPE_RAW)) {
+                streamInfo->fmt = CAM_FORMAT_YUV_420_NV21;
+            } else {
+                rc = pStream->getFormat(streamInfo->fmt);
+            }
+            rc = pStream->getFrameDimension(streamInfo->dim);
+            if ( contStream ) {
+                streamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+                streamInfo->num_of_burst = 0;
+            } else {
+                streamInfo->streaming_mode = CAM_STREAMING_MODE_BURST;
+                streamInfo->num_of_burst = burstNum;
+            }
+
+            cam_stream_reproc_config_t rp_cfg;
+            memset(&rp_cfg, 0, sizeof(cam_stream_reproc_config_t));
+            if (offline) {
+                cam_frame_len_offset_t offset;
+                memset(&offset, 0, sizeof(cam_frame_len_offset_t));
+
+                rp_cfg.pp_type = CAM_OFFLINE_REPROCESS_TYPE;
+                pStream->getFormat(rp_cfg.offline.input_fmt);
+                pStream->getFrameDimension(rp_cfg.offline.input_dim);
+                pStream->getFrameOffset(offset);
+                rp_cfg.offline.input_buf_planes.plane_info = offset;
+                rp_cfg.offline.input_type = pStream->getMyOriginalType();
+                //For input metadata + input buffer
+                rp_cfg.offline.num_of_bufs = 2;
+            } else {
+                rp_cfg.pp_type = CAM_ONLINE_REPROCESS_TYPE;
+                rp_cfg.online.input_stream_id = pStream->getMyServerID();
+                rp_cfg.online.input_stream_type = pStream->getMyOriginalType();
+            }
+            param.getStreamRotation(streamInfo->stream_type,
+                    streamInfo->pp_config, streamInfo->dim);
+            streamInfo->reprocess_config = rp_cfg;
+            streamInfo->reprocess_config.pp_feature_config = featureConfig;
+
+            if (!(pStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
+                pStream->isOrignalTypeOf(CAM_STREAM_TYPE_SNAPSHOT))) {
+                streamInfo->reprocess_config.pp_feature_config.feature_mask &= ~CAM_QCOM_FEATURE_CAC;
+                //Don't do WNR for thumbnail
+                streamInfo->reprocess_config.pp_feature_config.feature_mask &= ~CAM_QCOM_FEATURE_DENOISE2D;
+
+                if (param.isHDREnabled()
+                  && !param.isHDRThumbnailProcessNeeded()){
+                    streamInfo->reprocess_config.pp_feature_config.feature_mask
+                      &= ~CAM_QCOM_FEATURE_HDR;
+                }
+            }
+
+
+            if (streamInfo->reprocess_config.online.input_stream_type == CAM_STREAM_TYPE_SNAPSHOT) {
+                // Reprocess can be for both zsl and non-zsl cases
+                int flipMode =
+                    param.getFlipMode(streamInfo->reprocess_config.online.input_stream_type);
+                if (flipMode > 0) {
+                    streamInfo->reprocess_config.pp_feature_config.feature_mask |=
+                            CAM_QCOM_FEATURE_FLIP;
+                    streamInfo->reprocess_config.pp_feature_config.flip = (uint32_t)flipMode;
+                }
+            }
+
+            if (streamInfo->reprocess_config.offline.input_type == CAM_STREAM_TYPE_SNAPSHOT) {
+                int flipMode =
+                        param.getFlipMode(streamInfo->reprocess_config.offline.input_type);
+                if (flipMode > 0) {
+                    streamInfo->reprocess_config.pp_feature_config.feature_mask |=
+                            CAM_QCOM_FEATURE_FLIP;
+                    streamInfo->reprocess_config.pp_feature_config.flip = (uint32_t)flipMode;
+                }
+            }
+
+            if ((streamInfo->reprocess_config.pp_feature_config.feature_mask
+                    & CAM_QCOM_FEATURE_SCALE)
+                    && param.m_reprocScaleParam.isScaleEnabled()
+                    && param.m_reprocScaleParam.isUnderScaling()) {
+                //we only Scale Snapshot frame
+                if (pStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT)) {
+                    streamInfo->dim.width =
+                            streamInfo->reprocess_config.pp_feature_config.scale_param.output_width;
+                    streamInfo->dim.height =
+                            streamInfo->reprocess_config.pp_feature_config.scale_param.output_height;
+                }
+                CDBG_HIGH("%s: stream width=%d, height=%d.",
+                        __func__, streamInfo->dim.width, streamInfo->dim.height);
+            }
+
+            // save source stream handler
+            mSrcStreamHandles[mStreams.size()] = pStream->getMyHandle();
+
+            pMiscBuf = allocator.allocateMiscBuf(streamInfo);
+
+            // add reprocess stream
+            rc = addStream(allocator, pStreamInfoBuf, pMiscBuf,
+                    minStreamBufNum, &padding, NULL, NULL, false, false,
+                    streamInfo->reprocess_config.pp_feature_config.rotation);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: add reprocess stream failed, ret = %d", __func__, rc);
+                break;
+            }
+        }
+    }
+
+    if (rc == NO_ERROR) {
+        m_pSrcChannel = pSrcChannel;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamBySrouceHandle
+ *
+ * DESCRIPTION: find reprocess stream by its source stream handle
+ *
+ * PARAMETERS :
+ *   @srcHandle : source stream handle
+ *
+ * RETURN     : ptr to reprocess stream if found. NULL if not found
+ *==========================================================================*/
+QCameraStream * QCameraReprocessChannel::getStreamBySrouceHandle(uint32_t srcHandle)
+{
+    QCameraStream *pStream = NULL;
+
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if (mSrcStreamHandles[i] == srcHandle) {
+            pStream = mStreams[i];
+            break;
+        }
+    }
+
+    return pStream;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop
+ *
+ * DESCRIPTION: Unmap offline buffers and stop channel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocessChannel::stop()
+{
+    if (!mOfflineBuffers.empty()) {
+        QCameraStream *stream = NULL;
+        List<OfflineBuffer>::iterator it = mOfflineBuffers.begin();
+        int error = NO_ERROR;
+        for( ; it != mOfflineBuffers.end(); it++) {
+            stream = (*it).stream;
+            if (NULL != stream) {
+                error = stream->unmapBuf((*it).type,
+                                         (*it).index,
+                                         -1);
+                if (NO_ERROR != error) {
+                    ALOGE("%s: Error during offline buffer unmap %d",
+                          __func__, error);
+                }
+            }
+        }
+        mOfflineBuffers.clear();
+    }
+
+    return QCameraChannel::stop();
+}
+
+/*===========================================================================
+ * FUNCTION   : doReprocessOffline
+ *
+ * DESCRIPTION: request to do offline reprocess on the frame
+ *
+ * PARAMETERS :
+ *   @frame   : frame to be performed a reprocess
+ *   @meta_buf : Metadata buffer for reprocessing
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocessChannel::doReprocessOffline(mm_camera_super_buf_t *frame,
+        mm_camera_buf_def_t *meta_buf)
+{
+    int32_t rc = 0;
+    OfflineBuffer mappedBuffer;
+    QCameraStream *pStream = NULL;
+
+    if (mStreams.size() < 1) {
+        ALOGE("%s: No reprocess streams", __func__);
+        return -1;
+    }
+    if (m_pSrcChannel == NULL) {
+        ALOGE("%s: No source channel for reprocess", __func__);
+        return -1;
+    }
+
+    if (frame == NULL) {
+        ALOGE("%s: Invalid source frame", __func__);
+        return BAD_VALUE;
+    }
+
+    for (uint32_t i = 0; i < frame->num_bufs; i++) {
+        pStream = getStreamBySrouceHandle(frame->bufs[i]->stream_id);
+        if ((pStream != NULL) &&
+                (m_handle == pStream->getChannelHandle())) {
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                continue;
+            }
+
+            uint32_t meta_buf_index = 0;
+            if (NULL != meta_buf) {
+                rc = pStream->mapBuf(CAM_MAPPING_BUF_TYPE_OFFLINE_META_BUF,
+                                     meta_buf_index,
+                                     -1,
+                                     meta_buf->fd,
+                                     meta_buf->frame_len);
+                if (NO_ERROR != rc ) {
+                    ALOGE("%s : Error during metadata buffer mapping",
+                          __func__);
+                    break;
+                }
+                // we have meta data sent together with reprocess frame
+                uint32_t stream_id = frame->bufs[i]->stream_id;
+                QCameraStream *srcStream =
+                        m_pSrcChannel->getStreamByHandle(stream_id);
+                metadata_buffer_t *pMetaData =
+                        (metadata_buffer_t *)meta_buf->buffer;
+                if ((NULL != pMetaData) && (NULL != srcStream)) {
+                    IF_META_AVAILABLE(cam_crop_data_t, crop, CAM_INTF_META_CROP_DATA, pMetaData) {
+                        if (MAX_NUM_STREAMS > crop->num_of_streams) {
+                            for (int j = 0; j < MAX_NUM_STREAMS; j++) {
+                                if (crop->crop_info[j].stream_id ==
+                                            srcStream->getMyServerID()) {
+                                    // Store crop/roi information for offline reprocess
+                                    // in the reprocess stream slot
+                                    crop->crop_info[crop->num_of_streams].crop =
+                                            crop->crop_info[j].crop;
+                                    crop->crop_info[crop->num_of_streams].roi_map =
+                                            crop->crop_info[j].roi_map;
+                                    crop->crop_info[crop->num_of_streams].stream_id =
+                                            mStreams[0]->getMyServerID();
+                                    crop->num_of_streams++;
+
+                                    break;
+                                }
+                            }
+                        } else {
+                            ALOGE("%s: No space to add reprocess stream crop/roi information",
+                                    __func__);
+                        }
+                    }
+                }
+            }
+            mappedBuffer.index = meta_buf_index;
+            mappedBuffer.stream = pStream;
+            mappedBuffer.type = CAM_MAPPING_BUF_TYPE_OFFLINE_META_BUF;
+            mOfflineBuffers.push_back(mappedBuffer);
+
+            uint32_t buf_index = 1;
+            rc = pStream->mapBuf(CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF,
+                                 buf_index,
+                                 -1,
+                                 frame->bufs[i]->fd,
+                                 frame->bufs[i]->frame_len);
+            if (NO_ERROR != rc ) {
+                ALOGE("%s : Error during reprocess input buffer mapping",
+                      __func__);
+                break;
+            }
+            mappedBuffer.index = buf_index;
+            mappedBuffer.stream = pStream;
+            mappedBuffer.type = CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF;
+            mOfflineBuffers.push_back(mappedBuffer);
+
+            cam_stream_parm_buffer_t param;
+            memset(&param, 0, sizeof(cam_stream_parm_buffer_t));
+
+            param.type = CAM_STREAM_PARAM_TYPE_DO_REPROCESS;
+            param.reprocess.buf_index = buf_index;
+            param.reprocess.frame_idx = frame->bufs[i]->frame_idx;
+            param.reprocess.meta_present = 1;
+            param.reprocess.meta_buf_index = meta_buf_index;
+
+            rc = pStream->setParameter(param);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: stream setParameter for reprocess failed",
+                      __func__);
+                break;
+            }
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : doReprocess
+ *
+ * DESCRIPTION: request to do a reprocess on the frame
+ *
+ * PARAMETERS :
+ *   @frame   : frame to be performed a reprocess
+ *   @mParameter : camera parameters
+ *   @pMetaStream: Metadata stream handle
+ *   @meta_buf_index : Metadata buffer index
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocessChannel::doReprocess(mm_camera_super_buf_t *frame,
+        QCameraParameters &mParameter, QCameraStream *pMetaStream,
+        uint8_t meta_buf_index)
+{
+    int32_t rc = 0;
+    if (mStreams.size() < 1) {
+        ALOGE("%s: No reprocess streams", __func__);
+        return -1;
+    }
+    if (m_pSrcChannel == NULL) {
+        ALOGE("%s: No source channel for reprocess", __func__);
+        return -1;
+    }
+
+    for (uint32_t i = 0; i < frame->num_bufs; i++) {
+        QCameraStream *pStream = getStreamBySrouceHandle(frame->bufs[i]->stream_id);
+        if ((pStream != NULL) && (m_handle == pStream->getChannelHandle())) {
+            if (mParameter.getofflineRAW() &&
+                    !pStream->isOrignalTypeOf(CAM_STREAM_TYPE_RAW)) {
+                continue;
+            }
+            if ((pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) ||
+                    (pStream->isTypeOf(CAM_STREAM_TYPE_ANALYSIS))) {
+                // Skip metadata for reprocess now because PP module cannot handle meta data
+                // May need furthur discussion if Imaginglib need meta data
+                continue;
+            }
+
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
+                pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW)) {
+                // Skip postview: In non zsl case, dont want to send
+                // thumbnail through reprocess.
+                // Skip preview: for same reason in ZSL case
+                continue;
+            }
+
+            cam_stream_parm_buffer_t param;
+            memset(&param, 0, sizeof(cam_stream_parm_buffer_t));
+            param.type = CAM_STREAM_PARAM_TYPE_DO_REPROCESS;
+            param.reprocess.buf_index = frame->bufs[i]->buf_idx;
+            param.reprocess.frame_idx = frame->bufs[i]->frame_idx;
+            if (pMetaStream != NULL) {
+                // we have meta data frame bundled, sent together with reprocess frame
+                param.reprocess.meta_present = 1;
+                param.reprocess.meta_stream_handle = pMetaStream->getMyServerID();
+                param.reprocess.meta_buf_index = meta_buf_index;
+            }
+
+            CDBG_HIGH("Frame for reprocessing id = %d buf Id = %d meta index = %d",
+                    param.reprocess.frame_idx, param.reprocess.buf_index,
+                    param.reprocess.meta_buf_index);
+
+            rc = pStream->setParameter(param);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: stream setParameter for reprocess failed", __func__);
+                break;
+            }
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : doReprocess
+ *
+ * DESCRIPTION: request to do a reprocess on the frame
+ *
+ * PARAMETERS :
+ *   @buf_fd     : fd to the input buffer that needs reprocess
+ *   @buf_lenght : length of the input buffer
+ *   @ret_val    : result of reprocess.
+ *                 Example: Could be faceID in case of register face image.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocessChannel::doReprocess(int buf_fd,
+        size_t buf_length, int32_t &ret_val)
+{
+    int32_t rc = 0;
+    if (mStreams.size() < 1) {
+        ALOGE("%s: No reprocess streams", __func__);
+        return -1;
+    }
+
+    uint32_t buf_idx = 0;
+    for (size_t i = 0; i < mStreams.size(); i++) {
+        if ((mStreams[i] != NULL) &&
+                (m_handle != mStreams[i]->getChannelHandle())) {
+            continue;
+        }
+        rc = mStreams[i]->mapBuf(CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF,
+                                 buf_idx, -1,
+                                 buf_fd, buf_length);
+
+        if (rc == NO_ERROR) {
+            cam_stream_parm_buffer_t param;
+            memset(&param, 0, sizeof(cam_stream_parm_buffer_t));
+            param.type = CAM_STREAM_PARAM_TYPE_DO_REPROCESS;
+            param.reprocess.buf_index = buf_idx;
+            rc = mStreams[i]->setParameter(param);
+            if (rc == NO_ERROR) {
+                ret_val = param.reprocess.ret_val;
+            }
+            mStreams[i]->unmapBuf(CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF,
+                                  buf_idx, -1);
+        }
+    }
+    return rc;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL/QCameraChannel.h b/camera/QCamera2/HAL/QCameraChannel.h
new file mode 100644
index 0000000..b3d82d3
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraChannel.h
@@ -0,0 +1,160 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_CHANNEL_H__
+#define __QCAMERA_CHANNEL_H__
+
+#include <hardware/camera.h>
+#include "QCameraStream.h"
+
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+namespace qcamera {
+
+class QCameraChannel
+{
+public:
+    QCameraChannel(uint32_t cam_handle,
+                   mm_camera_ops_t *cam_ops);
+    QCameraChannel();
+    virtual ~QCameraChannel();
+    virtual int32_t init(mm_camera_channel_attr_t *attr,
+                         mm_camera_buf_notify_t dataCB, // data CB for channel data
+                         void *userData);
+    // Owner of memory is transferred from the caller to the caller with this call.
+    virtual int32_t addStream(QCameraAllocator& allocator,
+            QCameraHeapMemory *streamInfoBuf, QCameraHeapMemory *miscBuf,
+            uint8_t minStreamBufnum, cam_padding_info_t *paddingInfo,
+            stream_cb_routine stream_cb, void *userdata, bool bDynAllocBuf,
+            bool bDeffAlloc = false, cam_rotation_t online_rotation = ROTATE_0);
+    virtual int32_t linkStream(QCameraChannel *ch, QCameraStream *stream);
+    virtual int32_t start();
+    virtual int32_t stop();
+    virtual int32_t bufDone(mm_camera_super_buf_t *recvd_frame);
+    virtual int32_t processZoomDone(preview_stream_ops_t *previewWindow,
+                                    cam_crop_data_t &crop_info);
+    int32_t config();
+    QCameraStream *getStreamByHandle(uint32_t streamHandle);
+    uint32_t getMyHandle() const {return m_handle;};
+    uint32_t getNumOfStreams() const {return (uint32_t) mStreams.size();};
+    QCameraStream *getStreamByIndex(uint32_t index);
+    QCameraStream *getStreamByServerID(uint32_t serverID);
+    int32_t UpdateStreamBasedParameters(QCameraParameters &param);
+    void deleteChannel();
+
+protected:
+    uint32_t m_camHandle;
+    mm_camera_ops_t *m_camOps;
+    bool m_bIsActive;
+    bool m_bAllowDynBufAlloc; // if buf allocation can be in two steps
+
+    uint32_t m_handle;
+    Vector<QCameraStream *> mStreams;
+    mm_camera_buf_notify_t mDataCB;
+    void *mUserData;
+};
+
+// burst pic channel: i.e. zsl burst mode
+class QCameraPicChannel : public QCameraChannel
+{
+public:
+    QCameraPicChannel(uint32_t cam_handle,
+                      mm_camera_ops_t *cam_ops);
+    QCameraPicChannel();
+    virtual ~QCameraPicChannel();
+    int32_t takePicture(uint8_t num_of_snapshot, uint8_t num_of_retro_snapshot);
+    int32_t cancelPicture();
+    int32_t stopAdvancedCapture(mm_camera_advanced_capture_t type);
+    int32_t startAdvancedCapture(mm_camera_advanced_capture_t type,
+            cam_capture_frame_config_t *config = NULL);
+    int32_t flushSuperbuffer(uint32_t frame_idx);
+};
+
+// video channel class
+class QCameraVideoChannel : public QCameraChannel
+{
+public:
+    QCameraVideoChannel(uint32_t cam_handle,
+                        mm_camera_ops_t *cam_ops);
+    QCameraVideoChannel();
+    virtual ~QCameraVideoChannel();
+    int32_t releaseFrame(const void *opaque, bool isMetaData);
+};
+
+// reprocess channel class
+class QCameraReprocessChannel : public QCameraChannel
+{
+public:
+    QCameraReprocessChannel(uint32_t cam_handle,
+                            mm_camera_ops_t *cam_ops);
+    QCameraReprocessChannel();
+    virtual ~QCameraReprocessChannel();
+    int32_t addReprocStreamsFromSource(QCameraAllocator& allocator,
+                                       cam_pp_feature_config_t &config,
+                                       QCameraChannel *pSrcChannel,
+                                       uint8_t minStreamBufNum,
+                                       uint8_t burstNum,
+                                       cam_padding_info_t *paddingInfo,
+                                       QCameraParameters &param,
+                                       bool contStream,
+                                       bool offline);
+    // online reprocess
+    int32_t doReprocess(mm_camera_super_buf_t *frame,
+            QCameraParameters &param, QCameraStream *pMetaStream,
+            uint8_t meta_buf_index);
+
+    // offline reprocess
+    int32_t doReprocess(int buf_fd, size_t buf_length, int32_t &ret_val);
+
+    int32_t doReprocessOffline(mm_camera_super_buf_t *frame, 
+             mm_camera_buf_def_t *meta_buf);
+
+    int32_t stop();
+    QCameraChannel *getSrcChannel(){return m_pSrcChannel;};
+
+private:
+    QCameraStream *getStreamBySrouceHandle(uint32_t srcHandle);
+
+    typedef struct {
+        QCameraStream *stream;
+        cam_mapping_buf_type type;
+        uint32_t index;
+    } OfflineBuffer;
+
+    uint32_t mSrcStreamHandles[MAX_STREAM_NUM_IN_BUNDLE];
+    QCameraChannel *m_pSrcChannel; // ptr to source channel for reprocess
+    android::List<OfflineBuffer> mOfflineBuffers;
+
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA_CHANNEL_H__ */
diff --git a/camera/QCamera2/HAL/QCameraMem.cpp b/camera/QCamera2/HAL/QCameraMem.cpp
new file mode 100644
index 0000000..47e1eff
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraMem.cpp
@@ -0,0 +1,1939 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+#define MEMLOG_THRESH 102400
+#define LOG_TAG "QCameraHWI_Mem"
+
+#include <string.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <utils/Errors.h>
+#include <utils/Trace.h>
+#include <utils/Log.h>
+#include <gralloc_priv.h>
+#include <QComOMXMetadata.h>
+#include "QCamera2HWI.h"
+#include "QCameraMem.h"
+#include "QCameraParameters.h"
+
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+using namespace android;
+
+namespace qcamera {
+
+// QCaemra2Memory base class
+
+/*===========================================================================
+ * FUNCTION   : QCameraMemory
+ *
+ * DESCRIPTION: default constructor of QCameraMemory
+ *
+ * PARAMETERS :
+ *   @cached  : flag indicates if using cached memory
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraMemory::QCameraMemory(bool cached,
+        QCameraMemoryPool *pool,
+        cam_stream_type_t streamType, cam_stream_buf_type bufType)
+    :m_bCached(cached),
+     mMemoryPool(pool),
+     mStreamType(streamType),
+     mBufType(bufType)
+{
+    mBufferCount = 0;
+    reset();
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraMemory
+ *
+ * DESCRIPTION: deconstructor of QCameraMemory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraMemory::~QCameraMemory()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : cacheOpsInternal
+ *
+ * DESCRIPTION: ion related memory cache operations
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @cmd     : cache ops command
+ *   @vaddr   : ptr to the virtual address
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraMemory::cacheOpsInternal(uint32_t index, unsigned int cmd, void *vaddr)
+{
+    if (!m_bCached) {
+        // Memory is not cached, no need for cache ops
+        CDBG("%s: No cache ops here for uncached memory", __func__);
+        return OK;
+    }
+
+    struct ion_flush_data cache_inv_data;
+    struct ion_custom_data custom_data;
+    int ret = OK;
+
+    if (index >= mBufferCount) {
+        ALOGE("%s: index %d out of bound [0, %d)", __func__, index, mBufferCount);
+        return BAD_INDEX;
+    }
+
+    memset(&cache_inv_data, 0, sizeof(cache_inv_data));
+    memset(&custom_data, 0, sizeof(custom_data));
+    cache_inv_data.vaddr = vaddr;
+    cache_inv_data.fd = mMemInfo[index].fd;
+    cache_inv_data.handle = mMemInfo[index].handle;
+    cache_inv_data.length =
+            ( /* FIXME: Should remove this after ION interface changes */ unsigned int)
+            mMemInfo[index].size;
+    custom_data.cmd = cmd;
+    custom_data.arg = (unsigned long)&cache_inv_data;
+
+    CDBG_HIGH("%s: addr = %p, fd = %d, handle = %lx length = %d, ION Fd = %d",
+         __func__, cache_inv_data.vaddr, cache_inv_data.fd,
+         (unsigned long)cache_inv_data.handle, cache_inv_data.length,
+         mMemInfo[index].main_ion_fd);
+    ret = ioctl(mMemInfo[index].main_ion_fd, ION_IOC_CUSTOM, &custom_data);
+    if (ret < 0)
+        ALOGE("%s: Cache Invalidate failed: %s\n", __func__, strerror(errno));
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFd
+ *
+ * DESCRIPTION: return file descriptor of the indexed buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : file descriptor
+ *==========================================================================*/
+int QCameraMemory::getFd(uint32_t index) const
+{
+    if (index >= mBufferCount)
+        return BAD_INDEX;
+
+    return mMemInfo[index].fd;
+}
+
+/*===========================================================================
+ * FUNCTION   : getSize
+ *
+ * DESCRIPTION: return buffer size of the indexed buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer size
+ *==========================================================================*/
+ssize_t QCameraMemory::getSize(uint32_t index) const
+{
+    if (index >= mBufferCount)
+        return BAD_INDEX;
+
+    return (ssize_t)mMemInfo[index].size;
+}
+
+/*===========================================================================
+ * FUNCTION   : getCnt
+ *
+ * DESCRIPTION: query number of buffers allocated
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of buffers allocated
+ *==========================================================================*/
+uint8_t QCameraMemory::getCnt() const
+{
+    return mBufferCount;
+}
+
+/*===========================================================================
+ * FUNCTION   : reset
+ *
+ * DESCRIPTION: reset member variables
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraMemory::reset()
+{
+    size_t i, count;
+
+    memset(mMemInfo, 0, sizeof(mMemInfo));
+
+    count = sizeof(mMemInfo) / sizeof(mMemInfo[0]);
+    for (i = 0; i < count; i++) {
+        mMemInfo[i].fd = -1;
+        mMemInfo[i].main_ion_fd = -1;
+    }
+
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufDef
+ *
+ * DESCRIPTION: query detailed buffer information
+ *
+ * PARAMETERS :
+ *   @offset  : [input] frame buffer offset
+ *   @bufDef  : [output] reference to struct to store buffer definition
+ *   @index   : [input] index of the buffer
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraMemory::getBufDef(const cam_frame_len_offset_t &offset,
+        mm_camera_buf_def_t &bufDef, uint32_t index) const
+{
+    if (!mBufferCount) {
+        ALOGE("Memory not allocated");
+        return;
+    }
+    bufDef.fd = mMemInfo[index].fd;
+    bufDef.buf_type = CAM_STREAM_BUF_TYPE_MPLANE;
+    bufDef.frame_len = offset.frame_len;
+    bufDef.mem_info = (void *)this;
+    bufDef.planes_buf.num_planes = (int8_t)offset.num_planes;
+    bufDef.buffer = getPtr(index);
+    bufDef.buf_idx = index;
+
+    /* Plane 0 needs to be set separately. Set other planes in a loop */
+    bufDef.planes_buf.planes[0].length = offset.mp[0].len;
+    bufDef.planes_buf.planes[0].m.userptr = (long unsigned int)mMemInfo[index].fd;
+    bufDef.planes_buf.planes[0].data_offset = offset.mp[0].offset;
+    bufDef.planes_buf.planes[0].reserved[0] = 0;
+    for (int i = 1; i < bufDef.planes_buf.num_planes; i++) {
+         bufDef.planes_buf.planes[i].length = offset.mp[i].len;
+         bufDef.planes_buf.planes[i].m.userptr = (long unsigned int)mMemInfo[i].fd;
+         bufDef.planes_buf.planes[i].data_offset = offset.mp[i].offset;
+         bufDef.planes_buf.planes[i].reserved[0] =
+                 bufDef.planes_buf.planes[i-1].reserved[0] +
+                 bufDef.planes_buf.planes[i-1].length;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getUserBufDef
+ *
+ * DESCRIPTION: Fill Buffer structure with user buffer information
+                           This also fills individual stream buffers inside batch baffer strcuture
+ *
+ * PARAMETERS :
+ *   @buf_info : user buffer information
+ *   @bufDef  : Buffer strcuture to fill user buf info
+ *   @index   : index of the buffer
+ *   @plane_offset : plane buffer information
+ *   @planeBufDef  : [input] frame buffer offset
+ *   @bufs    : Stream Buffer object
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraMemory::getUserBufDef(const cam_stream_user_buf_info_t &buf_info,
+        mm_camera_buf_def_t &bufDef,
+        uint32_t index,
+        const cam_frame_len_offset_t &plane_offset,
+        mm_camera_buf_def_t *planeBufDef,
+        QCameraMemory *bufs) const
+{
+    struct msm_camera_user_buf_cont_t *cont_buf = NULL;
+    uint32_t plane_idx = (index * buf_info.frame_buf_cnt);
+
+    if (!mBufferCount) {
+        ALOGE("Memory not allocated");
+        return INVALID_OPERATION;
+    }
+
+    for (int count = 0; count < mBufferCount; count++) {
+        bufDef.fd = mMemInfo[count].fd;
+        bufDef.buf_type = CAM_STREAM_BUF_TYPE_USERPTR;
+        bufDef.frame_len = buf_info.size;
+        bufDef.mem_info = (void *)this;
+        bufDef.buffer = (void *)((uint8_t *)getPtr(count)
+                + (index * buf_info.size));
+        bufDef.buf_idx = index;
+        bufDef.user_buf.num_buffers = (int8_t)buf_info.frame_buf_cnt;
+        bufDef.user_buf.bufs_used = (int8_t)buf_info.frame_buf_cnt;
+
+        //Individual plane buffer structure to be filled
+        cont_buf = (struct msm_camera_user_buf_cont_t *)bufDef.buffer;
+        cont_buf->buf_cnt = bufDef.user_buf.num_buffers;
+
+        for (int i = 0; i < bufDef.user_buf.num_buffers; i++) {
+            bufs->getBufDef(plane_offset, planeBufDef[plane_idx], plane_idx);
+            bufDef.user_buf.buf_idx[i] = -1;
+            cont_buf->buf_idx[i] = planeBufDef[plane_idx].buf_idx;
+            plane_idx++;
+        }
+        bufDef.user_buf.plane_buf = planeBufDef;
+
+        CDBG("%s: num_buf = %d index = %d plane_idx = %d",
+                __func__, bufDef.user_buf.num_buffers, index, plane_idx);
+    }
+    return NO_ERROR;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : traceLogAllocStart
+ *
+ * DESCRIPTION: query detailed buffer information
+ *
+ * PARAMETERS :
+ *   @size  : [input] alloc
+ *   @count  : [input] number of buffers
+ *   @allocName   : [input] name for the alloc
+ *
+ * RETURN     : none
+ *==========================================================================*/
+inline void QCameraMemory::traceLogAllocStart(size_t size, int count, const char *allocName)
+{
+    ALOGD("%s : alloc E count=%d size=%zu", __func__, count, size);
+#ifdef ATRACE_TAG_CAMERA
+    char atracer[30];
+    if ((size * (size_t)count) > MEMLOG_THRESH) {
+        snprintf(atracer,sizeof(atracer), "%s %zu",allocName, size);
+        ATRACE_BEGIN(atracer);
+        ALOGE("%s:%s", __func__, atracer);
+    } else {
+        ATRACE_CALL();
+    }
+#endif
+}
+
+/*===========================================================================
+ * FUNCTION   : traceLogAllocEnd
+ *
+ * DESCRIPTION: query detailed buffer information
+ *
+ * PARAMETERS :
+ *   @size  : [input] alloc
+ *   @count  : [input] number of buffers
+ *
+ * RETURN     : none
+ *==========================================================================*/
+inline void QCameraMemory::traceLogAllocEnd(size_t size)
+{
+    ALOGD(" %s : X", __func__);
+#ifdef ATRACE_TAG_CAMERA
+    if (size > MEMLOG_THRESH) {
+        ATRACE_END();
+        ALOGE("%s %zu", __func__, size);
+    }
+#endif
+}
+
+/*===========================================================================
+ * FUNCTION   : alloc
+ *
+ * DESCRIPTION: allocate requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *   @heap_id : heap id to indicate where the buffers will be allocated from
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraMemory::alloc(int count, size_t size, unsigned int heap_id,
+        uint32_t secure_mode)
+{
+    int rc = OK;
+
+    int new_bufCnt = mBufferCount + count;
+    traceLogAllocStart(size, count, "Memsize");
+
+    if (new_bufCnt > MM_CAMERA_MAX_NUM_FRAMES) {
+        ALOGE("%s: Buffer count %d out of bound. Max is %d",
+              __func__, new_bufCnt, MM_CAMERA_MAX_NUM_FRAMES);
+        return BAD_INDEX;
+    }
+
+    for (int i = mBufferCount; i < new_bufCnt; i ++) {
+        if ( NULL == mMemoryPool ) {
+            CDBG_HIGH("%s : No memory pool available, allocating now", __func__);
+            rc = allocOneBuffer(mMemInfo[i], heap_id, size, m_bCached,
+                     secure_mode);
+            if (rc < 0) {
+                ALOGE("%s: AllocateIonMemory failed", __func__);
+                for (int j = i-1; j >= 0; j--)
+                    deallocOneBuffer(mMemInfo[j]);
+                break;
+            }
+        } else {
+            rc = mMemoryPool->allocateBuffer(mMemInfo[i],
+                                             heap_id,
+                                             size,
+                                             m_bCached,
+                                             mStreamType,
+                                             secure_mode);
+            if (rc < 0) {
+                ALOGE("%s: Memory pool allocation failed", __func__);
+                for (int j = i-1; j >= 0; j--)
+                    mMemoryPool->releaseBuffer(mMemInfo[j],
+                                               mStreamType);
+                break;
+            }
+        }
+
+    }
+    traceLogAllocEnd (size * (size_t)count);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : dealloc
+ *
+ * DESCRIPTION: deallocate buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraMemory::dealloc()
+{
+    for (int i = 0; i < mBufferCount; i++) {
+        if ( NULL == mMemoryPool ) {
+            deallocOneBuffer(mMemInfo[i]);
+        } else {
+            mMemoryPool->releaseBuffer(mMemInfo[i], mStreamType);
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : allocOneBuffer
+ *
+ * DESCRIPTION: impl of allocating one buffers of certain size
+ *
+ * PARAMETERS :
+ *   @memInfo : [output] reference to struct to store additional memory allocation info
+ *   @heap    : [input] heap id to indicate where the buffers will be allocated from
+ *   @size    : [input] lenght of the buffer to be allocated
+ *   @cached  : [input] flag whether buffer needs to be cached
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraMemory::allocOneBuffer(QCameraMemInfo &memInfo,
+        unsigned int heap_id, size_t size, bool cached, uint32_t secure_mode)
+{
+    int rc = OK;
+    struct ion_handle_data handle_data;
+    struct ion_allocation_data alloc;
+    struct ion_fd_data ion_info_fd;
+    int main_ion_fd = -1;
+
+    main_ion_fd = open("/dev/ion", O_RDONLY);
+    if (main_ion_fd < 0) {
+        ALOGE("Ion dev open failed: %s\n", strerror(errno));
+        goto ION_OPEN_FAILED;
+    }
+
+    memset(&alloc, 0, sizeof(alloc));
+    alloc.len = size;
+    /* to make it page size aligned */
+    alloc.len = (alloc.len + 4095U) & (~4095U);
+    alloc.align = 4096;
+    if (cached) {
+        alloc.flags = ION_FLAG_CACHED;
+    }
+    alloc.heap_id_mask = heap_id;
+    if (secure_mode == SECURE) {
+        ALOGD("%s: Allocate secure buffer\n", __func__);
+        alloc.flags = ION_SECURE;
+        alloc.heap_id_mask = ION_HEAP(ION_CP_MM_HEAP_ID);
+        alloc.align = 1048576; // 1 MiB alignment to be able to protect later
+        alloc.len = (alloc.len + 1048575U) & (~1048575U);
+    }
+
+    rc = ioctl(main_ion_fd, ION_IOC_ALLOC, &alloc);
+    if (rc < 0) {
+        ALOGE("ION allocation failed: %s\n", strerror(errno));
+        goto ION_ALLOC_FAILED;
+    }
+
+    memset(&ion_info_fd, 0, sizeof(ion_info_fd));
+    ion_info_fd.handle = alloc.handle;
+    rc = ioctl(main_ion_fd, ION_IOC_SHARE, &ion_info_fd);
+    if (rc < 0) {
+        ALOGE("ION map failed %s\n", strerror(errno));
+        goto ION_MAP_FAILED;
+    }
+
+    memInfo.main_ion_fd = main_ion_fd;
+    memInfo.fd = ion_info_fd.fd;
+    memInfo.handle = ion_info_fd.handle;
+    memInfo.size = alloc.len;
+    memInfo.cached = cached;
+    memInfo.heap_id = heap_id;
+
+    ALOGD("%s : ION buffer %lx with size %d allocated",
+            __func__, (unsigned long)memInfo.handle, alloc.len);
+    return OK;
+
+ION_MAP_FAILED:
+    memset(&handle_data, 0, sizeof(handle_data));
+    handle_data.handle = ion_info_fd.handle;
+    ioctl(main_ion_fd, ION_IOC_FREE, &handle_data);
+ION_ALLOC_FAILED:
+    close(main_ion_fd);
+ION_OPEN_FAILED:
+    return NO_MEMORY;
+}
+
+/*===========================================================================
+ * FUNCTION   : deallocOneBuffer
+ *
+ * DESCRIPTION: impl of deallocating one buffers
+ *
+ * PARAMETERS :
+ *   @memInfo : reference to struct that stores additional memory allocation info
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraMemory::deallocOneBuffer(QCameraMemInfo &memInfo)
+{
+    struct ion_handle_data handle_data;
+
+    if (memInfo.fd >= 0) {
+        close(memInfo.fd);
+        memInfo.fd = -1;
+    }
+
+    if (memInfo.main_ion_fd >= 0) {
+        memset(&handle_data, 0, sizeof(handle_data));
+        handle_data.handle = memInfo.handle;
+        ioctl(memInfo.main_ion_fd, ION_IOC_FREE, &handle_data);
+        close(memInfo.main_ion_fd);
+        memInfo.main_ion_fd = -1;
+    }
+    memInfo.handle = 0;
+    memInfo.size = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraMemoryPool
+ *
+ * DESCRIPTION: default constructor of QCameraMemoryPool
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraMemoryPool::QCameraMemoryPool()
+{
+    pthread_mutex_init(&mLock, NULL);
+}
+
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraMemoryPool
+ *
+ * DESCRIPTION: deconstructor of QCameraMemoryPool
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraMemoryPool::~QCameraMemoryPool()
+{
+    clear();
+    pthread_mutex_destroy(&mLock);
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseBuffer
+ *
+ * DESCRIPTION: release one cached buffers
+ *
+ * PARAMETERS :
+ *   @memInfo : reference to struct that stores additional memory allocation info
+ *   @streamType: Type of stream the buffers belongs to
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraMemoryPool::releaseBuffer(
+        struct QCameraMemory::QCameraMemInfo &memInfo,
+        cam_stream_type_t streamType)
+{
+    pthread_mutex_lock(&mLock);
+
+    mPools[streamType].push_back(memInfo);
+
+    pthread_mutex_unlock(&mLock);
+}
+
+/*===========================================================================
+ * FUNCTION   : clear
+ *
+ * DESCRIPTION: clears all cached buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraMemoryPool::clear()
+{
+    pthread_mutex_lock(&mLock);
+
+    for (int i = CAM_STREAM_TYPE_DEFAULT; i < CAM_STREAM_TYPE_MAX; i++ ) {
+        List<struct QCameraMemory::QCameraMemInfo>::iterator it;
+        it = mPools[i].begin();
+        for( ; it != mPools[i].end() ; it++) {
+            QCameraMemory::deallocOneBuffer(*it);
+        }
+
+        mPools[i].clear();
+    }
+
+    pthread_mutex_unlock(&mLock);
+}
+
+/*===========================================================================
+ * FUNCTION   : findBufferLocked
+ *
+ * DESCRIPTION: search for a appropriate cached buffer
+ *
+ * PARAMETERS :
+ *   @memInfo : reference to struct that stores additional memory allocation info
+ *   @heap_id : type of heap
+ *   @size    : size of the buffer
+ *   @cached  : whether the buffer should be cached
+ *   @streaType: type of stream this buffer belongs to
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraMemoryPool::findBufferLocked(
+        struct QCameraMemory::QCameraMemInfo &memInfo, unsigned int heap_id,
+        size_t size, bool cached, cam_stream_type_t streamType)
+{
+    int rc = NAME_NOT_FOUND;
+
+    if (mPools[streamType].empty()) {
+        return NAME_NOT_FOUND;
+    }
+
+    List<struct QCameraMemory::QCameraMemInfo>::iterator it;
+    it = mPools[streamType].begin();
+    for ( ; it != mPools[streamType].end() ; it++) {
+        if ( ((*it).size >= size) &&
+            ((*it).heap_id == heap_id) &&
+            ((*it).cached == cached) ) {
+            memInfo = *it;
+            mPools[streamType].erase(it);
+            rc = NO_ERROR;
+            break;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateBuffer
+ *
+ * DESCRIPTION: allocates a buffer from the memory pool,
+ *              it will re-use cached buffers if possible
+ *
+ * PARAMETERS :
+ *   @memInfo : reference to struct that stores additional memory allocation info
+ *   @heap_id : type of heap
+ *   @size    : size of the buffer
+ *   @cached  : whether the buffer should be cached
+ *   @streaType: type of stream this buffer belongs to
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraMemoryPool::allocateBuffer(
+        struct QCameraMemory::QCameraMemInfo &memInfo, unsigned int heap_id,
+        size_t size, bool cached, cam_stream_type_t streamType,
+        uint32_t secure_mode)
+{
+    int rc = NO_ERROR;
+
+    pthread_mutex_lock(&mLock);
+
+    rc = findBufferLocked(memInfo, heap_id, size, cached, streamType);
+    if (NAME_NOT_FOUND == rc ) {
+        CDBG_HIGH("%s : Buffer not found!", __func__);
+        rc = QCameraMemory::allocOneBuffer(memInfo, heap_id, size, cached,
+                 secure_mode);
+    }
+
+    pthread_mutex_unlock(&mLock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraHeapMemory
+ *
+ * DESCRIPTION: constructor of QCameraHeapMemory for ion memory used internally in HAL
+ *
+ * PARAMETERS :
+ *   @cached  : flag indicates if using cached memory
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraHeapMemory::QCameraHeapMemory(bool cached)
+    : QCameraMemory(cached)
+{
+    for (int i = 0; i < MM_CAMERA_MAX_NUM_FRAMES; i ++)
+        mPtr[i] = NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraHeapMemory
+ *
+ * DESCRIPTION: deconstructor of QCameraHeapMemory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraHeapMemory::~QCameraHeapMemory()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : getPtr
+ *
+ * DESCRIPTION: return buffer pointer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer ptr
+ *==========================================================================*/
+void *QCameraHeapMemory::getPtr(uint32_t index) const
+{
+    if (index >= mBufferCount) {
+        ALOGE("index out of bound");
+        return (void *)BAD_INDEX;
+    }
+    return mPtr[index];
+}
+
+/*===========================================================================
+ * FUNCTION   : allocate
+ *
+ * DESCRIPTION: allocate requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraHeapMemory::allocate(uint8_t count, size_t size, uint32_t isSecure)
+{
+    int rc = -1;
+    traceLogAllocStart(size, count, "HeapMemsize");
+    uint32_t heap_id_mask = 0x1 << ION_IOMMU_HEAP_ID;
+    if (isSecure == SECURE) {
+        rc = alloc(count, size, heap_id_mask, SECURE);
+        if (rc < 0)
+            return rc;
+    } else {
+        rc = alloc(count, size, heap_id_mask, NON_SECURE);
+        if (rc < 0)
+            return rc;
+
+        for (int i = 0; i < count; i ++) {
+            void *vaddr = mmap(NULL,
+                        mMemInfo[i].size,
+                        PROT_READ | PROT_WRITE,
+                        MAP_SHARED,
+                        mMemInfo[i].fd, 0);
+            if (vaddr == MAP_FAILED) {
+                for (int j = i-1; j >= 0; j --) {
+                    munmap(mPtr[j], mMemInfo[j].size);
+                    mPtr[j] = NULL;
+                    deallocOneBuffer(mMemInfo[j]);
+                }
+                return NO_MEMORY;
+            } else
+                mPtr[i] = vaddr;
+        }
+    }
+    if (rc == 0) {
+        mBufferCount = count;
+    }
+    traceLogAllocEnd((size * count));
+    return OK;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateMore
+ *
+ * DESCRIPTION: allocate more requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraHeapMemory::allocateMore(uint8_t count, size_t size)
+{
+    traceLogAllocStart(size, count, "HeapMemsize");
+    unsigned int heap_id_mask = 0x1 << ION_IOMMU_HEAP_ID;
+    int rc = alloc(count, size, heap_id_mask, NON_SECURE);
+    if (rc < 0)
+        return rc;
+
+    for (int i = mBufferCount; i < count + mBufferCount; i ++) {
+        void *vaddr = mmap(NULL,
+                    mMemInfo[i].size,
+                    PROT_READ | PROT_WRITE,
+                    MAP_SHARED,
+                    mMemInfo[i].fd, 0);
+        if (vaddr == MAP_FAILED) {
+            for (int j = i-1; j >= mBufferCount; j --) {
+                munmap(mPtr[j], mMemInfo[j].size);
+                mPtr[j] = NULL;
+                deallocOneBuffer(mMemInfo[j]);
+            }
+            return NO_MEMORY;
+        } else {
+            mPtr[i] = vaddr;
+        }
+    }
+    mBufferCount = (uint8_t)(mBufferCount + count);
+    traceLogAllocEnd((size * count));
+    return OK;
+}
+
+/*===========================================================================
+ * FUNCTION   : deallocate
+ *
+ * DESCRIPTION: deallocate buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraHeapMemory::deallocate()
+{
+    for (int i = 0; i < mBufferCount; i++) {
+        munmap(mPtr[i], mMemInfo[i].size);
+        mPtr[i] = NULL;
+    }
+    dealloc();
+    mBufferCount = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : cacheOps
+ *
+ * DESCRIPTION: ion related memory cache operations
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @cmd     : cache ops command
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraHeapMemory::cacheOps(uint32_t index, unsigned int cmd)
+{
+    if (index >= mBufferCount)
+        return BAD_INDEX;
+    return cacheOpsInternal(index, cmd, mPtr[index]);
+}
+
+/*===========================================================================
+ * FUNCTION   : getRegFlags
+ *
+ * DESCRIPTION: query initial reg flags
+ *
+ * PARAMETERS :
+ *   @regFlags: initial reg flags of the allocated buffers
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraHeapMemory::getRegFlags(uint8_t * /*regFlags*/) const
+{
+    return INVALID_OPERATION;
+}
+
+/*===========================================================================
+ * FUNCTION   : getMemory
+ *
+ * DESCRIPTION: get camera memory
+ *
+ * PARAMETERS :
+ *   @index   : buffer index
+ *   @metadata: flag if it's metadata
+ *
+ * RETURN     : camera memory ptr
+ *              NULL if not supported or failed
+ *==========================================================================*/
+camera_memory_t *QCameraHeapMemory::getMemory(uint32_t /*index*/, bool /*metadata*/) const
+{
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : getMatchBufIndex
+ *
+ * DESCRIPTION: query buffer index by opaque ptr
+ *
+ * PARAMETERS :
+ *   @opaque  : opaque ptr
+ *   @metadata: flag if it's metadata
+ *
+ * RETURN     : buffer index if match found,
+ *              -1 if failed
+ *==========================================================================*/
+int QCameraHeapMemory::getMatchBufIndex(const void *opaque,
+                                        bool metadata) const
+{
+    int index = -1;
+    if (metadata) {
+        return -1;
+    }
+    for (int i = 0; i < mBufferCount; i++) {
+        if (mPtr[i] == opaque) {
+            index = i;
+            break;
+        }
+    }
+    return index;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraStreamMemory
+ *
+ * DESCRIPTION: constructor of QCameraStreamMemory
+ *              ION memory allocated directly from /dev/ion and shared with framework
+ *
+ * PARAMETERS :
+ *   @memory    : camera memory request ops table
+ *   @cached    : flag indicates if using cached memory
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraStreamMemory::QCameraStreamMemory(camera_request_memory memory,
+        bool cached,
+        QCameraMemoryPool *pool,
+        cam_stream_type_t streamType, cam_stream_buf_type bufType)
+    :QCameraMemory(cached, pool, streamType),
+     mGetMemory(memory)
+{
+    for (int i = 0; i < MM_CAMERA_MAX_NUM_FRAMES; i ++)
+        mCameraMemory[i] = NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraStreamMemory
+ *
+ * DESCRIPTION: deconstructor of QCameraStreamMemory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraStreamMemory::~QCameraStreamMemory()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : allocate
+ *
+ * DESCRIPTION: allocate requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraStreamMemory::allocate(uint8_t count, size_t size, uint32_t isSecure)
+{
+    traceLogAllocStart(size, count, "StreamMemsize");
+    unsigned int heap_id_mask = 0x1 << ION_IOMMU_HEAP_ID;
+    int rc = alloc(count, size, heap_id_mask, isSecure);
+    if (rc < 0)
+        return rc;
+
+    for (int i = 0; i < count; i ++) {
+        if (isSecure == SECURE) {
+            mCameraMemory[i] = 0;
+        } else {
+            mCameraMemory[i] = mGetMemory(mMemInfo[i].fd, mMemInfo[i].size, 1, this);
+        }
+    }
+    mBufferCount = count;
+    traceLogAllocEnd((size * count));
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateMore
+ *
+ * DESCRIPTION: allocate more requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraStreamMemory::allocateMore(uint8_t count, size_t size)
+{
+    traceLogAllocStart(size, count, "StreamMemsize");
+    unsigned int heap_id_mask = 0x1 << ION_IOMMU_HEAP_ID;
+    int rc = alloc(count, size, heap_id_mask, NON_SECURE);
+    if (rc < 0)
+        return rc;
+
+    for (int i = mBufferCount; i < mBufferCount + count; i++) {
+        mCameraMemory[i] = mGetMemory(mMemInfo[i].fd, mMemInfo[i].size, 1, this);
+    }
+    mBufferCount = (uint8_t)(mBufferCount + count);
+    traceLogAllocEnd((size * count));
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : deallocate
+ *
+ * DESCRIPTION: deallocate buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraStreamMemory::deallocate()
+{
+    for (int i = 0; i < mBufferCount; i ++) {
+        if (mCameraMemory[i])
+            mCameraMemory[i]->release(mCameraMemory[i]);
+        mCameraMemory[i] = NULL;
+    }
+    dealloc();
+    mBufferCount = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : cacheOps
+ *
+ * DESCRIPTION: ion related memory cache operations
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @cmd     : cache ops command
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraStreamMemory::cacheOps(uint32_t index, unsigned int cmd)
+{
+    if (index >= mBufferCount)
+        return BAD_INDEX;
+    return cacheOpsInternal(index, cmd, mCameraMemory[index]->data);
+}
+
+/*===========================================================================
+ * FUNCTION   : getRegFlags
+ *
+ * DESCRIPTION: query initial reg flags
+ *
+ * PARAMETERS :
+ *   @regFlags: initial reg flags of the allocated buffers
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraStreamMemory::getRegFlags(uint8_t *regFlags) const
+{
+    for (int i = 0; i < mBufferCount; i ++)
+        regFlags[i] = 1;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getMemory
+ *
+ * DESCRIPTION: get camera memory
+ *
+ * PARAMETERS :
+ *   @index   : buffer index
+ *   @metadata: flag if it's metadata
+ *
+ * RETURN     : camera memory ptr
+ *              NULL if not supported or failed
+ *==========================================================================*/
+camera_memory_t *QCameraStreamMemory::getMemory(uint32_t index,
+        bool metadata) const
+{
+    if (index >= mBufferCount || metadata)
+        return NULL;
+    return mCameraMemory[index];
+}
+
+/*===========================================================================
+ * FUNCTION   : getMatchBufIndex
+ *
+ * DESCRIPTION: query buffer index by opaque ptr
+ *
+ * PARAMETERS :
+ *   @opaque  : opaque ptr
+ *   @metadata: flag if it's metadata
+ *
+ * RETURN     : buffer index if match found,
+ *              -1 if failed
+ *==========================================================================*/
+int QCameraStreamMemory::getMatchBufIndex(const void *opaque,
+                                          bool metadata) const
+{
+    int index = -1;
+    if (metadata) {
+        return -1;
+    }
+    for (int i = 0; i < mBufferCount; i++) {
+        if (mCameraMemory[i]->data == opaque) {
+            index = i;
+            break;
+        }
+    }
+    return index;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPtr
+ *
+ * DESCRIPTION: return buffer pointer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer ptr
+ *==========================================================================*/
+void *QCameraStreamMemory::getPtr(uint32_t index) const
+{
+    if (index >= mBufferCount) {
+        ALOGE("index out of bound");
+        return (void *)BAD_INDEX;
+    }
+    if (mCameraMemory[index] == 0) {
+        return NULL;
+    }
+    return mCameraMemory[index]->data;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraVideoMemory
+ *
+ * DESCRIPTION: constructor of QCameraVideoMemory
+ *              VideoStream buffers also include metadata buffers
+ *
+ * PARAMETERS :
+ *   @memory    : camera memory request ops table
+ *   @cached    : flag indicates if using cached ION memory
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraVideoMemory::QCameraVideoMemory(camera_request_memory memory,
+                                       bool cached, cam_stream_buf_type bufType)
+    : QCameraStreamMemory(memory, cached)
+{
+    memset(mMetadata, 0, sizeof(mMetadata));
+    mMetaBufCount = 0;
+    mBufType = bufType;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraVideoMemory
+ *
+ * DESCRIPTION: deconstructor of QCameraVideoMemory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraVideoMemory::~QCameraVideoMemory()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : allocate
+ *
+ * DESCRIPTION: allocate requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraVideoMemory::allocate(uint8_t count, size_t size, uint32_t isSecure)
+{
+    traceLogAllocStart(size, count, "VideoMemsize");
+    int rc = QCameraStreamMemory::allocate(count, size, isSecure);
+    if (rc < 0)
+        return rc;
+
+    if (mBufType != CAM_STREAM_BUF_TYPE_USERPTR) {
+        rc = allocateMeta(count);
+        if (rc != NO_ERROR) {
+            return rc;
+        }
+
+        for (int i = 0; i < count; i ++) {
+            struct encoder_media_buffer_type * packet =
+                    (struct encoder_media_buffer_type *)mMetadata[i]->data;
+            //1     fd, 1 offset, 1 size, 1 color transform
+            packet->meta_handle = native_handle_create(1, 3);
+            packet->buffer_type = kMetadataBufferTypeCameraSource;
+            native_handle_t * nh = const_cast<native_handle_t *>(packet->meta_handle);
+            if (!nh) {
+                ALOGE("%s: Error in getting video native handle", __func__);
+                return NO_MEMORY;
+            }
+            nh->data[0] = mMemInfo[i].fd;
+            nh->data[1] = 0;
+            nh->data[2] = (int)mMemInfo[i].size;
+            nh->data[3] = private_handle_t::PRIV_FLAGS_ITU_R_709;
+        }
+    }
+    mBufferCount = count;
+    traceLogAllocEnd((size * count));
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateMore
+ *
+ * DESCRIPTION: allocate more requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraVideoMemory::allocateMore(uint8_t count, size_t size)
+{
+    traceLogAllocStart(size, count, "VideoMemsize");
+    int rc = QCameraStreamMemory::allocateMore(count, size);
+    if (rc < 0)
+        return rc;
+
+    if (mBufType != CAM_STREAM_BUF_TYPE_USERPTR) {
+        for (int i = mBufferCount; i < count + mBufferCount; i ++) {
+            mMetadata[i] = mGetMemory(-1,
+                    sizeof(struct encoder_media_buffer_type), 1, this);
+            if (!mMetadata[i]) {
+                ALOGE("allocation of video metadata failed.");
+                for (int j = mBufferCount; j <= i-1; j ++) {
+                    mMetadata[j]->release(mMetadata[j]);
+                    mCameraMemory[j]->release(mCameraMemory[j]);
+                    mCameraMemory[j] = NULL;
+                    deallocOneBuffer(mMemInfo[j]);;
+                }
+                return NO_MEMORY;
+            }
+            struct encoder_media_buffer_type * packet =
+                    (struct encoder_media_buffer_type *)mMetadata[i]->data;
+            packet->meta_handle = native_handle_create(1, 2); //1 fd, 1 offset and 1 size
+            packet->buffer_type = kMetadataBufferTypeCameraSource;
+            native_handle_t * nh = const_cast<native_handle_t *>(packet->meta_handle);
+            if (!nh) {
+                ALOGE("%s: Error in getting video native handle", __func__);
+                return NO_MEMORY;
+            }
+            nh->data[0] = mMemInfo[i].fd;
+            nh->data[1] = 0;
+            nh->data[2] = (int)mMemInfo[i].size;
+        }
+    }
+    mBufferCount = (uint8_t)(mBufferCount + count);
+    mMetaBufCount = mBufferCount;
+    traceLogAllocEnd((size * count));
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateMeta
+ *
+ * DESCRIPTION: allocate video encoder metadata structure
+ *
+ * PARAMETERS :
+ *   @fd_cnt : Total FD count
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraVideoMemory::allocateMeta(uint8_t buf_cnt)
+{
+    int rc = NO_ERROR;
+
+    for (int i = 0; i < buf_cnt; i++) {
+        mMetadata[i] = mGetMemory(-1,
+                sizeof(struct encoder_media_buffer_type), 1, this);
+        if (!mMetadata[i]) {
+            ALOGE("allocation of video metadata failed.");
+            for (int j = (i - 1); j >= 0; j--) {
+                mMetadata[j]->release(mMetadata[j]);
+            }
+            return NO_MEMORY;
+        }
+    }
+    mMetaBufCount = buf_cnt;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : deallocateMeta
+ *
+ * DESCRIPTION: deallocate video metadata buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraVideoMemory::deallocateMeta()
+{
+    for (int i = 0; i < mMetaBufCount; i ++) {
+        mMetadata[i]->release(mMetadata[i]);
+        mMetadata[i] = NULL;
+    }
+    mMetaBufCount = 0;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : deallocate
+ *
+ * DESCRIPTION: deallocate buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraVideoMemory::deallocate()
+{
+    if (mBufType != CAM_STREAM_BUF_TYPE_USERPTR) {
+        for (int i = 0; i < mBufferCount; i ++) {
+            struct encoder_media_buffer_type * packet =
+                    (struct encoder_media_buffer_type *)mMetadata[i]->data;
+            if (NULL != packet) {
+                native_handle_t * nh = const_cast<native_handle_t *>(packet->meta_handle);
+                if (NULL != nh) {
+                   if (native_handle_delete(nh)) {
+                       ALOGE("Unable to delete native handle");
+                   }
+                } else {
+                   ALOGE("native handle not available");
+                }
+            } else {
+                ALOGE("packet not available");
+            }
+        }
+    }
+
+    deallocateMeta();
+
+    QCameraStreamMemory::deallocate();
+    mBufferCount = 0;
+    mMetaBufCount = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : getMemory
+ *
+ * DESCRIPTION: get camera memory
+ *
+ * PARAMETERS :
+ *   @index   : buffer index
+ *   @metadata: flag if it's metadata
+ *
+ * RETURN     : camera memory ptr
+ *              NULL if not supported or failed
+ *==========================================================================*/
+camera_memory_t *QCameraVideoMemory::getMemory(uint32_t index,
+        bool metadata) const
+{
+    if (index >= mMetaBufCount || (!metadata && index >= mBufferCount))
+        return NULL;
+
+    if (metadata)
+        return mMetadata[index];
+    else
+        return mCameraMemory[index];
+}
+
+/*===========================================================================
+ * FUNCTION   : getMatchBufIndex
+ *
+ * DESCRIPTION: query buffer index by opaque ptr
+ *
+ * PARAMETERS :
+ *   @opaque  : opaque ptr
+ *   @metadata: flag if it's metadata
+ *
+ * RETURN     : buffer index if match found,
+ *              -1 if failed
+ *==========================================================================*/
+int QCameraVideoMemory::getMatchBufIndex(const void *opaque,
+                                         bool metadata) const
+{
+    int index = -1;
+
+    if (metadata) {
+        for (int i = 0; i < mMetaBufCount; i++) {
+            if (mMetadata[i]->data == opaque) {
+                index = i;
+                break;
+            }
+        }
+    } else {
+        for (int i = 0; i < mBufferCount; i++) {
+            if (mCameraMemory[i]->data == opaque) {
+                index = i;
+                break;
+            }
+        }
+    }
+    return index;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraGrallocMemory
+ *
+ * DESCRIPTION: constructor of QCameraGrallocMemory
+ *              preview stream buffers are allocated from gralloc native_windoe
+ *
+ * PARAMETERS :
+ *   @memory    : camera memory request ops table
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraGrallocMemory::QCameraGrallocMemory(camera_request_memory memory)
+        : QCameraMemory(true), mColorSpace(ITU_R_601_FR)
+{
+    mMinUndequeuedBuffers = 0;
+    mWindow = NULL;
+    mWidth = mHeight = mStride = mScanline = 0;
+    mFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP;
+    mGetMemory = memory;
+    for (int i = 0; i < MM_CAMERA_MAX_NUM_FRAMES; i ++) {
+        mBufferHandle[i] = NULL;
+        mLocalFlag[i] = BUFFER_NOT_OWNED;
+        mPrivateHandle[i] = NULL;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraGrallocMemory
+ *
+ * DESCRIPTION: deconstructor of QCameraGrallocMemory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraGrallocMemory::~QCameraGrallocMemory()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : setWindowInfo
+ *
+ * DESCRIPTION: set native window gralloc ops table
+ *
+ * PARAMETERS :
+ *   @window  : gralloc ops table ptr
+ *   @width   : width of preview frame
+ *   @height  : height of preview frame
+ *   @stride  : stride of preview frame
+ *   @scanline: scanline of preview frame
+ *   @foramt  : format of preview image
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraGrallocMemory::setWindowInfo(preview_stream_ops_t *window,
+        int width, int height, int stride, int scanline, int format)
+{
+    mWindow = window;
+    mWidth = width;
+    mHeight = height;
+    mStride = stride;
+    mScanline = scanline;
+    mFormat = format;
+}
+
+/*===========================================================================
+ * FUNCTION   : displayBuffer
+ *
+ * DESCRIPTION: send received frame to display
+ *
+ * PARAMETERS :
+ *   @index   : index of preview frame
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraGrallocMemory::displayBuffer(uint32_t index)
+{
+    int err = NO_ERROR;
+    int dequeuedIdx = BAD_INDEX;
+
+    if (BUFFER_NOT_OWNED == mLocalFlag[index]) {
+        ALOGE("%s: buffer to be enqueued is not owned", __func__);
+        return INVALID_OPERATION;
+    }
+
+    err = mWindow->enqueue_buffer(mWindow, (buffer_handle_t *)mBufferHandle[index]);
+    if(err != 0) {
+        ALOGE("%s: enqueue_buffer failed, err = %d", __func__, err);
+    } else {
+        CDBG("%s: enqueue_buffer hdl=%p", __func__, *mBufferHandle[index]);
+        mLocalFlag[index] = BUFFER_NOT_OWNED;
+    }
+
+    buffer_handle_t *buffer_handle = NULL;
+    int stride = 0;
+    err = mWindow->dequeue_buffer(mWindow, &buffer_handle, &stride);
+    if (err == NO_ERROR && buffer_handle != NULL) {
+        int i;
+        CDBG("%s: dequed buf hdl =%p", __func__, *buffer_handle);
+        for(i = 0; i < mBufferCount; i++) {
+            if(mBufferHandle[i] == buffer_handle) {
+                CDBG("%s: Found buffer in idx:%d", __func__, i);
+                mLocalFlag[i] = BUFFER_OWNED;
+                dequeuedIdx = i;
+                break;
+            }
+        }
+    } else {
+        CDBG_HIGH("%s: dequeue_buffer, no free buffer from display now", __func__);
+    }
+    return dequeuedIdx;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocate
+ *
+ * DESCRIPTION: allocate requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraGrallocMemory::allocate(uint8_t count, size_t /*size*/,
+        uint32_t /*isSecure*/)
+{
+    traceLogAllocStart(0,count, "Grallocbufcnt");
+    int err = 0;
+    status_t ret = NO_ERROR;
+    int gralloc_usage = 0;
+    struct ion_fd_data ion_info_fd;
+    memset(&ion_info_fd, 0, sizeof(ion_info_fd));
+
+    CDBG(" %s : E ", __func__);
+
+    if (!mWindow) {
+        ALOGE("Invalid native window");
+        return INVALID_OPERATION;
+    }
+
+    // Increment buffer count by min undequeued buffer.
+    err = mWindow->get_min_undequeued_buffer_count(mWindow,&mMinUndequeuedBuffers);
+    if (err != 0) {
+        ALOGE("get_min_undequeued_buffer_count  failed: %s (%d)",
+                strerror(-err), -err);
+        ret = UNKNOWN_ERROR;
+        goto end;
+    }
+
+    err = mWindow->set_buffer_count(mWindow, count);
+    if (err != 0) {
+         ALOGE("set_buffer_count failed: %s (%d)",
+                    strerror(-err), -err);
+         ret = UNKNOWN_ERROR;
+         goto end;
+    }
+
+    err = mWindow->set_buffers_geometry(mWindow, mStride, mScanline, mFormat);
+    if (err != 0) {
+         ALOGE("%s: set_buffers_geometry failed: %s (%d)",
+               __func__, strerror(-err), -err);
+         ret = UNKNOWN_ERROR;
+         goto end;
+    }
+
+    err = mWindow->set_crop(mWindow, 0, 0, mWidth, mHeight);
+    if (err != 0) {
+         ALOGE("%s: set_crop failed: %s (%d)",
+               __func__, strerror(-err), -err);
+         ret = UNKNOWN_ERROR;
+         goto end;
+    }
+
+    gralloc_usage = GRALLOC_USAGE_HW_CAMERA_WRITE | GRALLOC_USAGE_PRIVATE_IOMMU_HEAP;
+    err = mWindow->set_usage(mWindow, gralloc_usage);
+    if(err != 0) {
+        /* set_usage error out */
+        ALOGE("%s: set_usage rc = %d", __func__, err);
+        ret = UNKNOWN_ERROR;
+        goto end;
+    }
+    CDBG_HIGH("%s: usage = %d, geometry: %p, %d, %d, %d, %d, %d",
+          __func__, gralloc_usage, mWindow, mWidth, mHeight, mStride,
+          mScanline, mFormat);
+
+    //Allocate cnt number of buffers from native window
+    for (int cnt = 0; cnt < count; cnt++) {
+        int stride;
+        err = mWindow->dequeue_buffer(mWindow, &mBufferHandle[cnt], &stride);
+        if(!err) {
+            CDBG("dequeue buf hdl =%p", mBufferHandle[cnt]);
+            mLocalFlag[cnt] = BUFFER_OWNED;
+        } else {
+            mLocalFlag[cnt] = BUFFER_NOT_OWNED;
+            ALOGE("%s: dequeue_buffer idx = %d err = %d", __func__, cnt, err);
+        }
+
+        CDBG("%s: dequeue buf: %p\n", __func__, mBufferHandle[cnt]);
+
+        if(err != 0) {
+            ALOGE("%s: dequeue_buffer failed: %s (%d)",
+                  __func__, strerror(-err), -err);
+            ret = UNKNOWN_ERROR;
+            for(int i = 0; i < cnt; i++) {
+                if(mLocalFlag[i] != BUFFER_NOT_OWNED) {
+                    err = mWindow->cancel_buffer(mWindow, mBufferHandle[i]);
+                    CDBG_HIGH("%s: cancel_buffer: hdl =%p", __func__, (*mBufferHandle[i]));
+                }
+                mLocalFlag[i] = BUFFER_NOT_OWNED;
+                mBufferHandle[i] = NULL;
+            }
+            reset();
+            goto end;
+        }
+
+        mPrivateHandle[cnt] =
+            (struct private_handle_t *)(*mBufferHandle[cnt]);
+        mMemInfo[cnt].main_ion_fd = open("/dev/ion", O_RDONLY);
+        if (mMemInfo[cnt].main_ion_fd < 0) {
+            ALOGE("%s: failed: could not open ion device", __func__);
+            for(int i = 0; i < cnt; i++) {
+                struct ion_handle_data ion_handle;
+                memset(&ion_handle, 0, sizeof(ion_handle));
+                ion_handle.handle = mMemInfo[i].handle;
+                if (ioctl(mMemInfo[i].main_ion_fd, ION_IOC_FREE, &ion_handle) < 0) {
+                    ALOGE("%s: ion free failed", __func__);
+                }
+                close(mMemInfo[i].main_ion_fd);
+                if(mLocalFlag[i] != BUFFER_NOT_OWNED) {
+                    err = mWindow->cancel_buffer(mWindow, mBufferHandle[i]);
+                    CDBG_HIGH("%s: cancel_buffer: hdl =%p", __func__, (*mBufferHandle[i]));
+                }
+                mLocalFlag[i] = BUFFER_NOT_OWNED;
+                mBufferHandle[i] = NULL;
+            }
+            reset();
+            ret = UNKNOWN_ERROR;
+            goto end;
+        } else {
+            ion_info_fd.fd = mPrivateHandle[cnt]->fd;
+            if (ioctl(mMemInfo[cnt].main_ion_fd,
+                      ION_IOC_IMPORT, &ion_info_fd) < 0) {
+                ALOGE("%s: ION import failed\n", __func__);
+                for(int i = 0; i < cnt; i++) {
+                    struct ion_handle_data ion_handle;
+                    memset(&ion_handle, 0, sizeof(ion_handle));
+                    ion_handle.handle = mMemInfo[i].handle;
+                    if (ioctl(mMemInfo[i].main_ion_fd, ION_IOC_FREE, &ion_handle) < 0) {
+                        ALOGE("ion free failed");
+                    }
+                    close(mMemInfo[i].main_ion_fd);
+
+                    if(mLocalFlag[i] != BUFFER_NOT_OWNED) {
+                        err = mWindow->cancel_buffer(mWindow, mBufferHandle[i]);
+                        CDBG_HIGH("%s: cancel_buffer: hdl =%p", __func__, (*mBufferHandle[i]));
+                    }
+                    mLocalFlag[i] = BUFFER_NOT_OWNED;
+                    mBufferHandle[i] = NULL;
+                }
+                close(mMemInfo[cnt].main_ion_fd);
+                reset();
+                ret = UNKNOWN_ERROR;
+                goto end;
+            }
+        }
+        setMetaData(mPrivateHandle[cnt], UPDATE_COLOR_SPACE, &mColorSpace);
+        mCameraMemory[cnt] =
+            mGetMemory(mPrivateHandle[cnt]->fd,
+                    (size_t)mPrivateHandle[cnt]->size,
+                    1,
+                    (void *)this);
+        CDBG_HIGH("%s: idx = %d, fd = %d, size = %d, offset = %d",
+              __func__, cnt, mPrivateHandle[cnt]->fd,
+              mPrivateHandle[cnt]->size,
+              mPrivateHandle[cnt]->offset);
+        mMemInfo[cnt].fd = mPrivateHandle[cnt]->fd;
+        mMemInfo[cnt].size = (size_t)mPrivateHandle[cnt]->size;
+        mMemInfo[cnt].handle = ion_info_fd.handle;
+    }
+    mBufferCount = count;
+
+    //Cancel min_undequeued_buffer buffers back to the window
+    for (int i = 0; i < mMinUndequeuedBuffers; i ++) {
+        err = mWindow->cancel_buffer(mWindow, mBufferHandle[i]);
+        mLocalFlag[i] = BUFFER_NOT_OWNED;
+    }
+
+end:
+    CDBG(" %s : X ",__func__);
+    traceLogAllocEnd(count);
+    return ret;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : allocateMore
+ *
+ * DESCRIPTION: allocate more requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraGrallocMemory::allocateMore(uint8_t /*count*/, size_t /*size*/)
+{
+    ALOGE("%s: Not implenmented yet", __func__);
+    return UNKNOWN_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : deallocate
+ *
+ * DESCRIPTION: deallocate buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraGrallocMemory::deallocate()
+{
+    CDBG("%s: E ", __FUNCTION__);
+
+    for (int cnt = 0; cnt < mBufferCount; cnt++) {
+        mCameraMemory[cnt]->release(mCameraMemory[cnt]);
+        struct ion_handle_data ion_handle;
+        memset(&ion_handle, 0, sizeof(ion_handle));
+        ion_handle.handle = mMemInfo[cnt].handle;
+        if (ioctl(mMemInfo[cnt].main_ion_fd, ION_IOC_FREE, &ion_handle) < 0) {
+            ALOGE("ion free failed");
+        }
+        close(mMemInfo[cnt].main_ion_fd);
+        if(mLocalFlag[cnt] != BUFFER_NOT_OWNED) {
+            if (mWindow) {
+                mWindow->cancel_buffer(mWindow, mBufferHandle[cnt]);
+                CDBG_HIGH("cancel_buffer: hdl =%p", (*mBufferHandle[cnt]));
+            } else {
+                ALOGE("Preview window is NULL, cannot cancel_buffer: hdl =%p",
+                      (*mBufferHandle[cnt]));
+            }
+        }
+        mLocalFlag[cnt] = BUFFER_NOT_OWNED;
+        CDBG_HIGH("put buffer %d successfully", cnt);
+    }
+    mBufferCount = 0;
+    CDBG(" %s : X ",__FUNCTION__);
+}
+
+/*===========================================================================
+ * FUNCTION   : cacheOps
+ *
+ * DESCRIPTION: ion related memory cache operations
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @cmd     : cache ops command
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraGrallocMemory::cacheOps(uint32_t index, unsigned int cmd)
+{
+    if (index >= mBufferCount)
+        return BAD_INDEX;
+    return cacheOpsInternal(index, cmd, mCameraMemory[index]->data);
+}
+
+/*===========================================================================
+ * FUNCTION   : getRegFlags
+ *
+ * DESCRIPTION: query initial reg flags
+ *
+ * PARAMETERS :
+ *   @regFlags: initial reg flags of the allocated buffers
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraGrallocMemory::getRegFlags(uint8_t *regFlags) const
+{
+    int i = 0;
+    for (i = 0; i < mMinUndequeuedBuffers; i ++)
+        regFlags[i] = 0;
+    for (; i < mBufferCount; i ++)
+        regFlags[i] = 1;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getMemory
+ *
+ * DESCRIPTION: get camera memory
+ *
+ * PARAMETERS :
+ *   @index   : buffer index
+ *   @metadata: flag if it's metadata
+ *
+ * RETURN     : camera memory ptr
+ *              NULL if not supported or failed
+ *==========================================================================*/
+camera_memory_t *QCameraGrallocMemory::getMemory(uint32_t index,
+        bool metadata) const
+{
+    if (index >= mBufferCount || metadata)
+        return NULL;
+    return mCameraMemory[index];
+}
+
+/*===========================================================================
+ * FUNCTION   : getMatchBufIndex
+ *
+ * DESCRIPTION: query buffer index by opaque ptr
+ *
+ * PARAMETERS :
+ *   @opaque  : opaque ptr
+ *   @metadata: flag if it's metadata
+ *
+ * RETURN     : buffer index if match found,
+ *              -1 if failed
+ *==========================================================================*/
+int QCameraGrallocMemory::getMatchBufIndex(const void *opaque,
+                                           bool metadata) const
+{
+    int index = -1;
+    if (metadata) {
+        return -1;
+    }
+    for (int i = 0; i < mBufferCount; i++) {
+        if (mCameraMemory[i]->data == opaque) {
+            index = i;
+            break;
+        }
+    }
+    return index;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPtr
+ *
+ * DESCRIPTION: return buffer pointer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer ptr
+ *==========================================================================*/
+void *QCameraGrallocMemory::getPtr(uint32_t index) const
+{
+    if (index >= mBufferCount) {
+        ALOGE("index out of bound");
+        return (void *)BAD_INDEX;
+    }
+    return mCameraMemory[index]->data;
+}
+
+}; //namespace qcamera
diff --git a/camera/QCamera2/HAL/QCameraMem.h b/camera/QCamera2/HAL/QCameraMem.h
new file mode 100644
index 0000000..dbb401b
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraMem.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA2HWI_MEM_H__
+#define __QCAMERA2HWI_MEM_H__
+
+#include <hardware/camera.h>
+#include <utils/Mutex.h>
+#include <utils/List.h>
+#include <qdMetaData.h>
+
+extern "C" {
+#include <sys/types.h>
+#include <linux/msm_ion.h>
+#include <mm_camera_interface.h>
+}
+
+namespace qcamera {
+
+class QCameraMemoryPool;
+
+// Base class for all memory types. Abstract.
+class QCameraMemory {
+
+public:
+    int cleanCache(uint32_t index)
+    {
+        return cacheOps(index, ION_IOC_CLEAN_CACHES);
+    }
+    int invalidateCache(uint32_t index)
+    {
+        return cacheOps(index, ION_IOC_INV_CACHES);
+    }
+    int cleanInvalidateCache(uint32_t index)
+    {
+        return cacheOps(index, ION_IOC_CLEAN_INV_CACHES);
+    }
+    int getFd(uint32_t index) const;
+    ssize_t getSize(uint32_t index) const;
+    uint8_t getCnt() const;
+
+    virtual int allocate(uint8_t count, size_t size, uint32_t is_secure) = 0;
+    virtual void deallocate() = 0;
+    virtual int allocateMore(uint8_t count, size_t size) = 0;
+    virtual int cacheOps(uint32_t index, unsigned int cmd) = 0;
+    virtual int getRegFlags(uint8_t *regFlags) const = 0;
+    virtual camera_memory_t *getMemory(uint32_t index,
+            bool metadata) const = 0;
+    virtual int getMatchBufIndex(const void *opaque, bool metadata) const = 0;
+    virtual void *getPtr(uint32_t index) const= 0;
+
+    QCameraMemory(bool cached,
+                  QCameraMemoryPool *pool = NULL,
+                  cam_stream_type_t streamType = CAM_STREAM_TYPE_DEFAULT,
+                  cam_stream_buf_type buf_Type = CAM_STREAM_BUF_TYPE_MPLANE);
+    virtual ~QCameraMemory();
+    virtual void reset();
+
+    void getBufDef(const cam_frame_len_offset_t &offset,
+            mm_camera_buf_def_t &bufDef, uint32_t index) const;
+
+    int32_t getUserBufDef(const cam_stream_user_buf_info_t &buf_info,
+            mm_camera_buf_def_t &bufDef, uint32_t index,
+            const cam_frame_len_offset_t &plane_offset,
+            mm_camera_buf_def_t *planebufDef, QCameraMemory *bufs) const;
+
+    void traceLogAllocStart(size_t size, int count, const char *allocName);
+    void traceLogAllocEnd(size_t size);
+
+protected:
+
+    friend class QCameraMemoryPool;
+
+    struct QCameraMemInfo {
+        int fd;
+        int main_ion_fd;
+        ion_user_handle_t handle;
+        size_t size;
+        bool cached;
+        unsigned int heap_id;
+    };
+
+    int alloc(int count, size_t size, unsigned int heap_id,
+            uint32_t is_secure);
+    void dealloc();
+    static int allocOneBuffer(struct QCameraMemInfo &memInfo,
+            unsigned int heap_id, size_t size, bool cached, uint32_t is_secure);
+    static void deallocOneBuffer(struct QCameraMemInfo &memInfo);
+    int cacheOpsInternal(uint32_t index, unsigned int cmd, void *vaddr);
+
+    bool m_bCached;
+    uint8_t mBufferCount;
+    struct QCameraMemInfo mMemInfo[MM_CAMERA_MAX_NUM_FRAMES];
+    QCameraMemoryPool *mMemoryPool;
+    cam_stream_type_t mStreamType;
+    cam_stream_buf_type mBufType;
+};
+
+class QCameraMemoryPool {
+
+public:
+
+    QCameraMemoryPool();
+    virtual ~QCameraMemoryPool();
+
+    int allocateBuffer(struct QCameraMemory::QCameraMemInfo &memInfo,
+            unsigned int heap_id, size_t size, bool cached,
+            cam_stream_type_t streamType, uint32_t is_secure);
+    void releaseBuffer(struct QCameraMemory::QCameraMemInfo &memInfo,
+            cam_stream_type_t streamType);
+    void clear();
+
+protected:
+
+    int findBufferLocked(struct QCameraMemory::QCameraMemInfo &memInfo,
+            unsigned int heap_id, size_t size, bool cached,
+            cam_stream_type_t streamType);
+
+    android::List<QCameraMemory::QCameraMemInfo> mPools[CAM_STREAM_TYPE_MAX];
+    pthread_mutex_t mLock;
+};
+
+// Internal heap memory is used for memories used internally
+// They are allocated from /dev/ion.
+class QCameraHeapMemory : public QCameraMemory {
+public:
+    QCameraHeapMemory(bool cached);
+    virtual ~QCameraHeapMemory();
+
+    virtual int allocate(uint8_t count, size_t size, uint32_t is_secure);
+    virtual int allocateMore(uint8_t count, size_t size);
+    virtual void deallocate();
+    virtual int cacheOps(uint32_t index, unsigned int cmd);
+    virtual int getRegFlags(uint8_t *regFlags) const;
+    virtual camera_memory_t *getMemory(uint32_t index, bool metadata) const;
+    virtual int getMatchBufIndex(const void *opaque, bool metadata) const;
+    virtual void *getPtr(uint32_t index) const;
+
+private:
+    void *mPtr[MM_CAMERA_MAX_NUM_FRAMES];
+};
+
+// Externel heap memory is used for memories shared with
+// framework. They are allocated from /dev/ion or gralloc.
+class QCameraStreamMemory : public QCameraMemory {
+public:
+    QCameraStreamMemory(camera_request_memory getMemory,
+                        bool cached,
+                        QCameraMemoryPool *pool = NULL,
+                        cam_stream_type_t streamType = CAM_STREAM_TYPE_DEFAULT,
+                        cam_stream_buf_type buf_Type = CAM_STREAM_BUF_TYPE_MPLANE);
+    virtual ~QCameraStreamMemory();
+
+    virtual int allocate(uint8_t count, size_t size, uint32_t is_secure);
+    virtual int allocateMore(uint8_t count, size_t size);
+    virtual void deallocate();
+    virtual int cacheOps(uint32_t index, unsigned int cmd);
+    virtual int getRegFlags(uint8_t *regFlags) const;
+    virtual camera_memory_t *getMemory(uint32_t index, bool metadata) const;
+    virtual int getMatchBufIndex(const void *opaque, bool metadata) const;
+    virtual void *getPtr(uint32_t index) const;
+
+protected:
+    camera_request_memory mGetMemory;
+    camera_memory_t *mCameraMemory[MM_CAMERA_MAX_NUM_FRAMES];
+};
+
+// Externel heap memory is used for memories shared with
+// framework. They are allocated from /dev/ion or gralloc.
+class QCameraVideoMemory : public QCameraStreamMemory {
+public:
+    QCameraVideoMemory(camera_request_memory getMemory, bool cached,
+            cam_stream_buf_type bufType = CAM_STREAM_BUF_TYPE_MPLANE);
+    virtual ~QCameraVideoMemory();
+
+    virtual int allocate(uint8_t count, size_t size, uint32_t is_secure);
+    virtual int allocateMore(uint8_t count, size_t size);
+    virtual void deallocate();
+    virtual camera_memory_t *getMemory(uint32_t index, bool metadata) const;
+    virtual int getMatchBufIndex(const void *opaque, bool metadata) const;
+    int allocateMeta(uint8_t buf_cnt);
+    void deallocateMeta();
+
+private:
+    camera_memory_t *mMetadata[MM_CAMERA_MAX_NUM_FRAMES];
+    uint8_t mMetaBufCount;
+};
+
+
+// Gralloc Memory is acquired from preview window
+class QCameraGrallocMemory : public QCameraMemory {
+    enum {
+        BUFFER_NOT_OWNED,
+        BUFFER_OWNED,
+    };
+public:
+    QCameraGrallocMemory(camera_request_memory getMemory);
+    void setNativeWindow(preview_stream_ops_t *anw);
+    virtual ~QCameraGrallocMemory();
+
+    virtual int allocate(uint8_t count, size_t size, uint32_t is_secure);
+    virtual int allocateMore(uint8_t count, size_t size);
+    virtual void deallocate();
+    virtual int cacheOps(uint32_t index, unsigned int cmd);
+    virtual int getRegFlags(uint8_t *regFlags) const;
+    virtual camera_memory_t *getMemory(uint32_t index, bool metadata) const;
+    virtual int getMatchBufIndex(const void *opaque, bool metadata) const;
+    virtual void *getPtr(uint32_t index) const;
+
+    void setWindowInfo(preview_stream_ops_t *window, int width, int height,
+        int stride, int scanline, int format);
+    // Enqueue/display buffer[index] onto the native window,
+    // and dequeue one buffer from it.
+    // Returns the buffer index of the dequeued buffer.
+    int displayBuffer(uint32_t index);
+
+private:
+    buffer_handle_t *mBufferHandle[MM_CAMERA_MAX_NUM_FRAMES];
+    int mLocalFlag[MM_CAMERA_MAX_NUM_FRAMES];
+    struct private_handle_t *mPrivateHandle[MM_CAMERA_MAX_NUM_FRAMES];
+    preview_stream_ops_t *mWindow;
+    int mWidth, mHeight, mFormat, mStride, mScanline;
+    camera_request_memory mGetMemory;
+    camera_memory_t *mCameraMemory[MM_CAMERA_MAX_NUM_FRAMES];
+    int mMinUndequeuedBuffers;
+    enum ColorSpace_t mColorSpace;
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA2HWI_MEM_H__ */
diff --git a/camera/QCamera2/HAL/QCameraParameters.cpp b/camera/QCamera2/HAL/QCameraParameters.cpp
new file mode 100644
index 0000000..4d93682
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraParameters.cpp
@@ -0,0 +1,12126 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define LOG_TAG "QCameraParameters"
+
+#include <cutils/properties.h>
+#include <math.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <string.h>
+#include <stdlib.h>
+#include <gralloc_priv.h>
+#include <sys/sysinfo.h>
+#include "QCamera2HWI.h"
+#include "QCameraParameters.h"
+
+#define ASPECT_TOLERANCE 0.001
+
+namespace qcamera {
+// Parameter keys to communicate between camera application and driver.
+const char QCameraParameters::KEY_QC_SUPPORTED_HFR_SIZES[] = "hfr-size-values";
+const char QCameraParameters::KEY_QC_PREVIEW_FRAME_RATE_MODE[] = "preview-frame-rate-mode";
+const char QCameraParameters::KEY_QC_SUPPORTED_PREVIEW_FRAME_RATE_MODES[] = "preview-frame-rate-modes";
+const char QCameraParameters::KEY_QC_PREVIEW_FRAME_RATE_AUTO_MODE[] = "frame-rate-auto";
+const char QCameraParameters::KEY_QC_PREVIEW_FRAME_RATE_FIXED_MODE[] = "frame-rate-fixed";
+const char QCameraParameters::KEY_QC_TOUCH_AF_AEC[] = "touch-af-aec";
+const char QCameraParameters::KEY_QC_SUPPORTED_TOUCH_AF_AEC[] = "touch-af-aec-values";
+const char QCameraParameters::KEY_QC_TOUCH_INDEX_AEC[] = "touch-index-aec";
+const char QCameraParameters::KEY_QC_TOUCH_INDEX_AF[] = "touch-index-af";
+const char QCameraParameters::KEY_QC_SCENE_DETECT[] = "scene-detect";
+const char QCameraParameters::KEY_QC_SUPPORTED_SCENE_DETECT[] = "scene-detect-values";
+const char QCameraParameters::KEY_QC_ISO_MODE[] = "iso";
+const char QCameraParameters::KEY_QC_CONTINUOUS_ISO[] = "continuous-iso";
+const char QCameraParameters::KEY_QC_MIN_ISO[] = "min-iso";
+const char QCameraParameters::KEY_QC_MAX_ISO[] = "max-iso";
+const char QCameraParameters::KEY_QC_SUPPORTED_ISO_MODES[] = "iso-values";
+const char QCameraParameters::KEY_QC_EXPOSURE_TIME[] = "exposure-time";
+const char QCameraParameters::KEY_QC_MIN_EXPOSURE_TIME[] = "min-exposure-time";
+const char QCameraParameters::KEY_QC_MAX_EXPOSURE_TIME[] = "max-exposure-time";
+const char QCameraParameters::KEY_QC_CURRENT_EXPOSURE_TIME[] = "cur-exposure-time";
+const char QCameraParameters::KEY_QC_CURRENT_ISO[] = "cur-iso";
+const char QCameraParameters::KEY_QC_LENSSHADE[] = "lensshade";
+const char QCameraParameters::KEY_QC_SUPPORTED_LENSSHADE_MODES[] = "lensshade-values";
+const char QCameraParameters::KEY_QC_AUTO_EXPOSURE[] = "auto-exposure";
+const char QCameraParameters::KEY_QC_SUPPORTED_AUTO_EXPOSURE[] = "auto-exposure-values";
+const char QCameraParameters::KEY_QC_DENOISE[] = "denoise";
+const char QCameraParameters::KEY_QC_SUPPORTED_DENOISE[] = "denoise-values";
+const char QCameraParameters::KEY_QC_FOCUS_ALGO[] = "selectable-zone-af";
+const char QCameraParameters::KEY_QC_SUPPORTED_FOCUS_ALGOS[] = "selectable-zone-af-values";
+const char QCameraParameters::KEY_QC_MANUAL_FOCUS_POSITION[] = "manual-focus-position";
+const char QCameraParameters::KEY_QC_MANUAL_FOCUS_POS_TYPE[] = "manual-focus-pos-type";
+const char QCameraParameters::KEY_QC_MIN_FOCUS_POS_INDEX[] = "min-focus-pos-index";
+const char QCameraParameters::KEY_QC_MAX_FOCUS_POS_INDEX[] = "max-focus-pos-index";
+const char QCameraParameters::KEY_QC_MIN_FOCUS_POS_DAC[] = "min-focus-pos-dac";
+const char QCameraParameters::KEY_QC_MAX_FOCUS_POS_DAC[] = "max-focus-pos-dac";
+const char QCameraParameters::KEY_QC_MIN_FOCUS_POS_RATIO[] = "min-focus-pos-ratio";
+const char QCameraParameters::KEY_QC_MAX_FOCUS_POS_RATIO[] = "max-focus-pos-ratio";
+const char QCameraParameters::KEY_QC_FOCUS_POSITION_SCALE[] = "cur-focus-scale";
+const char QCameraParameters::KEY_QC_MIN_FOCUS_POS_DIOPTER[] = "min-focus-pos-diopter";
+const char QCameraParameters::KEY_QC_MAX_FOCUS_POS_DIOPTER[] = "max-focus-pos-diopter";
+const char QCameraParameters::KEY_QC_FOCUS_POSITION_DIOPTER[] = "cur-focus-diopter";
+const char QCameraParameters::KEY_QC_FACE_DETECTION[] = "face-detection";
+const char QCameraParameters::KEY_QC_SUPPORTED_FACE_DETECTION[] = "face-detection-values";
+const char QCameraParameters::KEY_QC_FACE_RECOGNITION[] = "face-recognition";
+const char QCameraParameters::KEY_QC_SUPPORTED_FACE_RECOGNITION[] = "face-recognition-values";
+const char QCameraParameters::KEY_QC_MEMORY_COLOR_ENHANCEMENT[] = "mce";
+const char QCameraParameters::KEY_QC_SUPPORTED_MEM_COLOR_ENHANCE_MODES[] = "mce-values";
+const char QCameraParameters::KEY_QC_DIS[] = "dis";
+const char QCameraParameters::KEY_QC_OIS[] = "ois";
+const char QCameraParameters::KEY_QC_SUPPORTED_DIS_MODES[] = "dis-values";
+const char QCameraParameters::KEY_QC_SUPPORTED_OIS_MODES[] = "ois-values";
+const char QCameraParameters::KEY_QC_VIDEO_HIGH_FRAME_RATE[] = "video-hfr";
+const char QCameraParameters::KEY_QC_VIDEO_HIGH_SPEED_RECORDING[] = "video-hsr";
+const char QCameraParameters::KEY_QC_SUPPORTED_VIDEO_HIGH_FRAME_RATE_MODES[] = "video-hfr-values";
+const char QCameraParameters::KEY_QC_REDEYE_REDUCTION[] = "redeye-reduction";
+const char QCameraParameters::KEY_QC_SUPPORTED_REDEYE_REDUCTION[] = "redeye-reduction-values";
+const char QCameraParameters::KEY_QC_HIGH_DYNAMIC_RANGE_IMAGING[] = "hdr";
+const char QCameraParameters::KEY_QC_SUPPORTED_HDR_IMAGING_MODES[] = "hdr-values";
+const char QCameraParameters::KEY_QC_ZSL[] = "zsl";
+const char QCameraParameters::KEY_QC_SUPPORTED_ZSL_MODES[] = "zsl-values";
+const char QCameraParameters::KEY_QC_ZSL_BURST_INTERVAL[] = "capture-burst-interval";
+const char QCameraParameters::KEY_QC_ZSL_BURST_LOOKBACK[] = "capture-burst-retroactive";
+const char QCameraParameters::KEY_QC_ZSL_QUEUE_DEPTH[] = "capture-burst-queue-depth";
+const char QCameraParameters::KEY_QC_CAMERA_MODE[] = "camera-mode";
+const char QCameraParameters::KEY_QC_AE_BRACKET_HDR[] = "ae-bracket-hdr";
+const char QCameraParameters::KEY_QC_SUPPORTED_AE_BRACKET_MODES[] = "ae-bracket-hdr-values";
+const char QCameraParameters::KEY_QC_SUPPORTED_RAW_FORMATS[] = "raw-format-values";
+const char QCameraParameters::KEY_QC_RAW_FORMAT[] = "raw-format";
+const char QCameraParameters::KEY_QC_ORIENTATION[] = "orientation";
+const char QCameraParameters::KEY_QC_SELECTABLE_ZONE_AF[] = "selectable-zone-af";
+const char QCameraParameters::KEY_QC_CAPTURE_BURST_EXPOSURE[] = "capture-burst-exposures";
+const char QCameraParameters::KEY_QC_NUM_SNAPSHOT_PER_SHUTTER[] = "num-snaps-per-shutter";
+const char QCameraParameters::KEY_QC_NUM_RETRO_BURST_PER_SHUTTER[] = "num-retro-burst-per-shutter";
+const char QCameraParameters::KEY_QC_SNAPSHOT_BURST_LED_ON_PERIOD[] = "zsl-burst-led-on-period";
+const char QCameraParameters::KEY_QC_NO_DISPLAY_MODE[] = "no-display-mode";
+const char QCameraParameters::KEY_QC_RAW_PICUTRE_SIZE[] = "raw-size";
+const char QCameraParameters::KEY_QC_SUPPORTED_SKIN_TONE_ENHANCEMENT_MODES[] = "skinToneEnhancement-values";
+const char QCameraParameters::KEY_QC_SUPPORTED_LIVESNAPSHOT_SIZES[] = "supported-live-snapshot-sizes";
+const char QCameraParameters::KEY_QC_SUPPORTED_HDR_NEED_1X[] = "hdr-need-1x-values";
+const char QCameraParameters::KEY_QC_HDR_NEED_1X[] = "hdr-need-1x";
+const char QCameraParameters::KEY_QC_PREVIEW_FLIP[] = "preview-flip";
+const char QCameraParameters::KEY_QC_VIDEO_FLIP[] = "video-flip";
+const char QCameraParameters::KEY_QC_SNAPSHOT_PICTURE_FLIP[] = "snapshot-picture-flip";
+const char QCameraParameters::KEY_QC_SUPPORTED_FLIP_MODES[] = "flip-mode-values";
+const char QCameraParameters::KEY_QC_VIDEO_HDR[] = "video-hdr";
+const char QCameraParameters::KEY_QC_SENSOR_HDR[] = "sensor-hdr";
+const char QCameraParameters::KEY_QC_VT_ENABLE[] = "avtimer";
+const char QCameraParameters::KEY_QC_SUPPORTED_VIDEO_HDR_MODES[] = "video-hdr-values";
+const char QCameraParameters::KEY_QC_SUPPORTED_SENSOR_HDR_MODES[] = "sensor-hdr-values";
+const char QCameraParameters::KEY_QC_AUTO_HDR_ENABLE [] = "auto-hdr-enable";
+const char QCameraParameters::KEY_QC_SNAPSHOT_BURST_NUM[] = "snapshot-burst-num";
+const char QCameraParameters::KEY_QC_SNAPSHOT_FD_DATA[] = "snapshot-fd-data-enable";
+const char QCameraParameters::KEY_QC_TINTLESS_ENABLE[] = "tintless";
+const char QCameraParameters::KEY_QC_SCENE_SELECTION[] = "scene-selection";
+const char QCameraParameters::KEY_QC_CDS_MODE[] = "cds-mode";
+const char QCameraParameters::KEY_QC_VIDEO_CDS_MODE[] = "video-cds-mode";
+const char QCameraParameters::KEY_QC_SUPPORTED_CDS_MODES[] = "cds-mode-values";
+const char QCameraParameters::KEY_QC_SUPPORTED_VIDEO_CDS_MODES[] = "video-cds-mode-values";
+const char QCameraParameters::KEY_QC_TNR_MODE[] = "tnr-mode";
+const char QCameraParameters::KEY_QC_VIDEO_TNR_MODE[] = "video-tnr-mode";
+const char QCameraParameters::KEY_QC_SUPPORTED_TNR_MODES[] = "tnr-mode-values";
+const char QCameraParameters::KEY_QC_SUPPORTED_VIDEO_TNR_MODES[] = "video-tnr-mode-values";
+const char QCameraParameters::KEY_QC_VIDEO_ROTATION[] = "video-rotation";
+const char QCameraParameters::KEY_QC_SUPPORTED_VIDEO_ROTATION_VALUES[] = "video-rotation-values";
+const char QCameraParameters::KEY_QC_AF_BRACKET[] = "af-bracket";
+const char QCameraParameters::KEY_QC_SUPPORTED_AF_BRACKET_MODES[] = "af-bracket-values";
+const char QCameraParameters::KEY_QC_RE_FOCUS[] = "re-focus";
+const char QCameraParameters::KEY_QC_SUPPORTED_RE_FOCUS_MODES[] = "re-focus-values";
+const char QCameraParameters::KEY_QC_CHROMA_FLASH[] = "chroma-flash";
+const char QCameraParameters::KEY_QC_SUPPORTED_CHROMA_FLASH_MODES[] = "chroma-flash-values";
+const char QCameraParameters::KEY_QC_OPTI_ZOOM[] = "opti-zoom";
+const char QCameraParameters::KEY_QC_SEE_MORE[] = "see-more";
+const char QCameraParameters::KEY_QC_STILL_MORE[] = "still-more";
+const char QCameraParameters::KEY_QC_SUPPORTED_OPTI_ZOOM_MODES[] = "opti-zoom-values";
+const char QCameraParameters::KEY_QC_HDR_MODE[] = "hdr-mode";
+const char QCameraParameters::KEY_QC_SUPPORTED_KEY_QC_HDR_MODES[] = "hdr-mode-values";
+const char QCameraParameters::KEY_QC_TRUE_PORTRAIT[] = "true-portrait";
+const char QCameraParameters::KEY_QC_SUPPORTED_TRUE_PORTRAIT_MODES[] = "true-portrait-values";
+const char QCameraParameters::KEY_QC_SUPPORTED_SEE_MORE_MODES[] = "see-more-values";
+const char QCameraParameters::KEY_QC_SUPPORTED_STILL_MORE_MODES[] = "still-more-values";
+const char QCameraParameters::KEY_INTERNAL_PERVIEW_RESTART[] = "internal-restart";
+const char QCameraParameters::KEY_QC_RDI_MODE[] = "rdi-mode";
+const char QCameraParameters::KEY_QC_SUPPORTED_RDI_MODES[] = "rdi-mode-values";
+const char QCameraParameters::KEY_QC_SECURE_MODE[] = "secure-mode";
+const char QCameraParameters::KEY_QC_SUPPORTED_SECURE_MODES[] = "secure-mode-values";
+const char QCameraParameters::ISO_HJR[] = "ISO_HJR";
+const char QCameraParameters::KEY_QC_AUTO_HDR_SUPPORTED[] = "auto-hdr-supported";
+const char QCameraParameters::KEY_QC_LONGSHOT_SUPPORTED[] = "longshot-supported";
+const char QCameraParameters::KEY_QC_ZSL_HDR_SUPPORTED[] = "zsl-hdr-supported";
+const char QCameraParameters::KEY_QC_WB_MANUAL_CCT[] = "wb-manual-cct";
+const char QCameraParameters::KEY_QC_MIN_WB_CCT[] = "min-wb-cct";
+const char QCameraParameters::KEY_QC_MAX_WB_CCT[] = "max-wb-cct";
+
+const char QCameraParameters::KEY_QC_MANUAL_WB_GAINS[] = "manual-wb-gains";
+const char QCameraParameters::KEY_QC_MIN_WB_GAIN[] = "min-wb-gain";
+const char QCameraParameters::KEY_QC_MAX_WB_GAIN[] = "max-wb-gain";
+
+const char QCameraParameters::KEY_QC_MANUAL_WB_TYPE[] = "manual-wb-type";
+const char QCameraParameters::KEY_QC_MANUAL_WB_VALUE[] = "manual-wb-value";
+
+const char QCameraParameters::WHITE_BALANCE_MANUAL[] = "manual";
+const char QCameraParameters::FOCUS_MODE_MANUAL_POSITION[] = "manual";
+
+
+// Values for effect settings.
+const char QCameraParameters::EFFECT_EMBOSS[] = "emboss";
+const char QCameraParameters::EFFECT_SKETCH[] = "sketch";
+const char QCameraParameters::EFFECT_NEON[] = "neon";
+
+// Values for auto exposure settings.
+const char QCameraParameters::TOUCH_AF_AEC_OFF[] = "touch-off";
+const char QCameraParameters::TOUCH_AF_AEC_ON[] = "touch-on";
+
+// Values for scene mode settings.
+const char QCameraParameters::SCENE_MODE_ASD[] = "asd";   // corresponds to CAMERA_BESTSHOT_AUTO in HAL
+const char QCameraParameters::SCENE_MODE_BACKLIGHT[] = "backlight";
+const char QCameraParameters::SCENE_MODE_FLOWERS[] = "flowers";
+const char QCameraParameters::SCENE_MODE_AR[] = "AR";
+const char QCameraParameters::SCENE_MODE_HDR[] = "hdr";
+
+// Formats for setPreviewFormat and setPictureFormat.
+const char QCameraParameters::PIXEL_FORMAT_YUV420SP_ADRENO[] = "yuv420sp-adreno";
+const char QCameraParameters::PIXEL_FORMAT_YV12[] = "yuv420p";
+const char QCameraParameters::PIXEL_FORMAT_NV12[] = "nv12";
+const char QCameraParameters::QC_PIXEL_FORMAT_NV12_VENUS[] = "nv12-venus";
+
+// Values for raw image formats
+const char QCameraParameters::QC_PIXEL_FORMAT_YUV_RAW_8BIT_YUYV[] = "yuv-raw8-yuyv";
+const char QCameraParameters::QC_PIXEL_FORMAT_YUV_RAW_8BIT_YVYU[] = "yuv-raw8-yvyu";
+const char QCameraParameters::QC_PIXEL_FORMAT_YUV_RAW_8BIT_UYVY[] = "yuv-raw8-uyvy";
+const char QCameraParameters::QC_PIXEL_FORMAT_YUV_RAW_8BIT_VYUY[] = "yuv-raw8-vyuy";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8GBRG[] = "bayer-qcom-8gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8GRBG[] = "bayer-qcom-8grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8RGGB[] = "bayer-qcom-8rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8BGGR[] = "bayer-qcom-8bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10GBRG[] = "bayer-qcom-10gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10GRBG[] = "bayer-qcom-10grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10RGGB[] = "bayer-qcom-10rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10BGGR[] = "bayer-qcom-10bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12GBRG[] = "bayer-qcom-12gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12GRBG[] = "bayer-qcom-12grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12RGGB[] = "bayer-qcom-12rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12BGGR[] = "bayer-qcom-12bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8GBRG[] = "bayer-mipi-8gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8GRBG[] = "bayer-mipi-8grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8RGGB[] = "bayer-mipi-8rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8BGGR[] = "bayer-mipi-8bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10GBRG[] = "bayer-mipi-10gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10GRBG[] = "bayer-mipi-10grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10RGGB[] = "bayer-mipi-10rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10BGGR[] = "bayer-mipi-10bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12GBRG[] = "bayer-mipi-12gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12GRBG[] = "bayer-mipi-12grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12RGGB[] = "bayer-mipi-12rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12BGGR[] = "bayer-mipi-12bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8GBRG[] = "bayer-ideal-qcom-8gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8GRBG[] = "bayer-ideal-qcom-8grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8RGGB[] = "bayer-ideal-qcom-8rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8BGGR[] = "bayer-ideal-qcom-8bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10GBRG[] = "bayer-ideal-qcom-10gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10GRBG[] = "bayer-ideal-qcom-10grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10RGGB[] = "bayer-ideal-qcom-10rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10BGGR[] = "bayer-ideal-qcom-10bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12GBRG[] = "bayer-ideal-qcom-12gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12GRBG[] = "bayer-ideal-qcom-12grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12RGGB[] = "bayer-ideal-qcom-12rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12BGGR[] = "bayer-ideal-qcom-12bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8GBRG[] = "bayer-ideal-mipi-8gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8GRBG[] = "bayer-ideal-mipi-8grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8RGGB[] = "bayer-ideal-mipi-8rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8BGGR[] = "bayer-ideal-mipi-8bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10GBRG[] = "bayer-ideal-mipi-10gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10GRBG[] = "bayer-ideal-mipi-10grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10RGGB[] = "bayer-ideal-mipi-10rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10BGGR[] = "bayer-ideal-mipi-10bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12GBRG[] = "bayer-ideal-mipi-12gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12GRBG[] = "bayer-ideal-mipi-12grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12RGGB[] = "bayer-ideal-mipi-12rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12BGGR[] = "bayer-ideal-mipi-12bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8GBRG[] = "bayer-ideal-plain8-8gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8GRBG[] = "bayer-ideal-plain8-8grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8RGGB[] = "bayer-ideal-plain8-8rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8BGGR[] = "bayer-ideal-plain8-8bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8GBRG[] = "bayer-ideal-plain16-8gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8GRBG[] = "bayer-ideal-plain16-8grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8RGGB[] = "bayer-ideal-plain16-8rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8BGGR[] = "bayer-ideal-plain16-8bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10GBRG[] = "bayer-ideal-plain16-10gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10GRBG[] = "bayer-ideal-plain16-10grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10RGGB[] = "bayer-ideal-plain16-10rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10BGGR[] = "bayer-ideal-plain16-10bggr";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12GBRG[] = "bayer-ideal-plain16-12gbrg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12GRBG[] = "bayer-ideal-plain16-12grbg";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12RGGB[] = "bayer-ideal-plain16-12rggb";
+const char QCameraParameters::QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12BGGR[] = "bayer-ideal-plain16-12bggr";
+
+// Values for ISO Settings
+const char QCameraParameters::ISO_AUTO[] = "auto";
+const char QCameraParameters::ISO_100[] = "ISO100";
+const char QCameraParameters::ISO_200[] = "ISO200";
+const char QCameraParameters::ISO_400[] = "ISO400";
+const char QCameraParameters::ISO_800[] = "ISO800";
+const char QCameraParameters::ISO_1600[] = "ISO1600";
+const char QCameraParameters::ISO_3200[] = "ISO3200";
+const char QCameraParameters::ISO_MANUAL[] = "manual";
+
+
+// Values for auto exposure settings.
+const char QCameraParameters::AUTO_EXPOSURE_FRAME_AVG[] = "frame-average";
+const char QCameraParameters::AUTO_EXPOSURE_CENTER_WEIGHTED[] = "center-weighted";
+const char QCameraParameters::AUTO_EXPOSURE_SPOT_METERING[] = "spot-metering";
+const char QCameraParameters::AUTO_EXPOSURE_SMART_METERING[] = "smart-metering";
+const char QCameraParameters::AUTO_EXPOSURE_USER_METERING[] = "user-metering";
+const char QCameraParameters::AUTO_EXPOSURE_SPOT_METERING_ADV[] = "spot-metering-adv";
+const char QCameraParameters::AUTO_EXPOSURE_CENTER_WEIGHTED_ADV[] = "center-weighted-adv";
+
+const char QCameraParameters::KEY_QC_GPS_LATITUDE_REF[] = "gps-latitude-ref";
+const char QCameraParameters::KEY_QC_GPS_LONGITUDE_REF[] = "gps-longitude-ref";
+const char QCameraParameters::KEY_QC_GPS_ALTITUDE_REF[] = "gps-altitude-ref";
+const char QCameraParameters::KEY_QC_GPS_STATUS[] = "gps-status";
+
+const char QCameraParameters::KEY_QC_HISTOGRAM[] = "histogram";
+const char QCameraParameters::KEY_QC_SUPPORTED_HISTOGRAM_MODES[] = "histogram-values";
+
+const char QCameraParameters::VALUE_ENABLE[] = "enable";
+const char QCameraParameters::VALUE_DISABLE[] = "disable";
+const char QCameraParameters::VALUE_OFF[] = "off";
+const char QCameraParameters::VALUE_ON[] = "on";
+const char QCameraParameters::VALUE_TRUE[] = "true";
+const char QCameraParameters::VALUE_FALSE[] = "false";
+
+const char QCameraParameters::KEY_QC_SHARPNESS[] = "sharpness";
+const char QCameraParameters::KEY_QC_MIN_SHARPNESS[] = "min-sharpness";
+const char QCameraParameters::KEY_QC_MAX_SHARPNESS[] = "max-sharpness";
+const char QCameraParameters::KEY_QC_SHARPNESS_STEP[] = "sharpness-step";
+const char QCameraParameters::KEY_QC_CONTRAST[] = "contrast";
+const char QCameraParameters::KEY_QC_MIN_CONTRAST[] = "min-contrast";
+const char QCameraParameters::KEY_QC_MAX_CONTRAST[] = "max-contrast";
+const char QCameraParameters::KEY_QC_CONTRAST_STEP[] = "contrast-step";
+const char QCameraParameters::KEY_QC_SATURATION[] = "saturation";
+const char QCameraParameters::KEY_QC_MIN_SATURATION[] = "min-saturation";
+const char QCameraParameters::KEY_QC_MAX_SATURATION[] = "max-saturation";
+const char QCameraParameters::KEY_QC_SATURATION_STEP[] = "saturation-step";
+const char QCameraParameters::KEY_QC_BRIGHTNESS[] = "luma-adaptation";
+const char QCameraParameters::KEY_QC_MIN_BRIGHTNESS[] = "min-brightness";
+const char QCameraParameters::KEY_QC_MAX_BRIGHTNESS[] = "max-brightness";
+const char QCameraParameters::KEY_QC_BRIGHTNESS_STEP[] = "brightness-step";
+const char QCameraParameters::KEY_QC_SCE_FACTOR[] = "skinToneEnhancement";
+const char QCameraParameters::KEY_QC_MIN_SCE_FACTOR[] = "min-sce-factor";
+const char QCameraParameters::KEY_QC_MAX_SCE_FACTOR[] = "max-sce-factor";
+const char QCameraParameters::KEY_QC_SCE_FACTOR_STEP[] = "sce-factor-step";
+
+const char QCameraParameters::KEY_QC_SUPPORTED_CAMERA_FEATURES[] = "qc-camera-features";
+const char QCameraParameters::KEY_QC_MAX_NUM_REQUESTED_FACES[] = "qc-max-num-requested-faces";
+
+//Values for DENOISE
+const char QCameraParameters::DENOISE_OFF[] = "denoise-off";
+const char QCameraParameters::DENOISE_ON[] = "denoise-on";
+
+// Values for selectable zone af Settings
+const char QCameraParameters::FOCUS_ALGO_AUTO[] = "auto";
+const char QCameraParameters::FOCUS_ALGO_SPOT_METERING[] = "spot-metering";
+const char QCameraParameters::FOCUS_ALGO_CENTER_WEIGHTED[] = "center-weighted";
+const char QCameraParameters::FOCUS_ALGO_FRAME_AVERAGE[] = "frame-average";
+
+// Values for HFR settings.
+const char QCameraParameters::VIDEO_HFR_OFF[] = "off";
+const char QCameraParameters::VIDEO_HFR_2X[] = "60";
+const char QCameraParameters::VIDEO_HFR_3X[] = "90";
+const char QCameraParameters::VIDEO_HFR_4X[] = "120";
+const char QCameraParameters::VIDEO_HFR_5X[] = "150";
+const char QCameraParameters::VIDEO_HFR_6X[] = "180";
+const char QCameraParameters::VIDEO_HFR_7X[] = "210";
+const char QCameraParameters::VIDEO_HFR_8X[] = "240";
+const char QCameraParameters::VIDEO_HFR_9X[] = "480";
+
+// Values for HDR Bracketing settings.
+const char QCameraParameters::AE_BRACKET_OFF[] = "Off";
+const char QCameraParameters::AE_BRACKET[] = "AE-Bracket";
+
+// Values for AF Bracketing setting.
+const char QCameraParameters::AF_BRACKET_OFF[] = "af-bracket-off";
+const char QCameraParameters::AF_BRACKET_ON[] = "af-bracket-on";
+
+// Values for Refocus setting.
+const char QCameraParameters::RE_FOCUS_OFF[] = "re-focus-off";
+const char QCameraParameters::RE_FOCUS_ON[] = "re-focus-on";
+
+// Values for Chroma Flash setting.
+const char QCameraParameters::CHROMA_FLASH_OFF[] = "chroma-flash-off";
+const char QCameraParameters::CHROMA_FLASH_ON[] = "chroma-flash-on";
+
+// Values for Opti Zoom setting.
+const char QCameraParameters::OPTI_ZOOM_OFF[] = "opti-zoom-off";
+const char QCameraParameters::OPTI_ZOOM_ON[] = "opti-zoom-on";
+
+// Values for Still More setting.
+const char QCameraParameters::STILL_MORE_OFF[] = "still-more-off";
+const char QCameraParameters::STILL_MORE_ON[] = "still-more-on";
+
+// Values for HDR mode setting.
+const char QCameraParameters::HDR_MODE_SENSOR[] = "hdr-mode-sensor";
+const char QCameraParameters::HDR_MODE_MULTI_FRAME[] = "hdr-mode-multiframe";
+
+// Values for True Portrait setting.
+const char QCameraParameters::TRUE_PORTRAIT_OFF[] = "true-portrait-off";
+const char QCameraParameters::TRUE_PORTRAIT_ON[] = "true-portrait-on";
+
+// Values for FLIP settings.
+const char QCameraParameters::FLIP_MODE_OFF[] = "off";
+const char QCameraParameters::FLIP_MODE_V[] = "flip-v";
+const char QCameraParameters::FLIP_MODE_H[] = "flip-h";
+const char QCameraParameters::FLIP_MODE_VH[] = "flip-vh";
+
+const char QCameraParameters::CDS_MODE_OFF[] = "off";
+const char QCameraParameters::CDS_MODE_ON[] = "on";
+const char QCameraParameters::CDS_MODE_AUTO[] = "auto";
+
+const char QCameraParameters::KEY_SELECTED_AUTO_SCENE[] = "selected-auto-scene";
+
+// Values for video rotation settings.
+const char QCameraParameters::VIDEO_ROTATION_0[] = "0";
+const char QCameraParameters::VIDEO_ROTATION_90[] = "90";
+const char QCameraParameters::VIDEO_ROTATION_180[] = "180";
+const char QCameraParameters::VIDEO_ROTATION_270[] = "270";
+
+const char QCameraParameters::KEY_QC_SUPPORTED_MANUAL_FOCUS_MODES[] = "manual-focus-modes";
+const char QCameraParameters::KEY_QC_SUPPORTED_MANUAL_EXPOSURE_MODES[] = "manual-exposure-modes";
+const char QCameraParameters::KEY_QC_SUPPORTED_MANUAL_WB_MODES[] = "manual-wb-modes";
+const char QCameraParameters::KEY_QC_FOCUS_SCALE_MODE[] = "scale-mode";
+const char QCameraParameters::KEY_QC_FOCUS_DIOPTER_MODE[] = "diopter-mode";
+const char QCameraParameters::KEY_QC_ISO_PRIORITY[] = "iso-priority";
+const char QCameraParameters::KEY_QC_EXP_TIME_PRIORITY[] = "exp-time-priority";
+const char QCameraParameters::KEY_QC_USER_SETTING[] = "user-setting";
+const char QCameraParameters::KEY_QC_WB_CCT_MODE[] = "color-temperature";
+const char QCameraParameters::KEY_QC_WB_GAIN_MODE[] = "rbgb-gains";
+
+static const char* portrait = "portrait";
+static const char* landscape = "landscape";
+
+const cam_dimension_t QCameraParameters::THUMBNAIL_SIZES_MAP[] = {
+    { 512, 288 }, //1.777778
+    { 480, 288 }, //1.666667
+    { 256, 154 }, //1.66233
+    { 432, 288 }, //1.5
+    { 320, 320 }, //1.0
+    { 320, 240 }, //1.33333
+    { 176, 144 }, //1.222222
+    { 0, 0 }      // required by Android SDK
+};
+
+const QCameraParameters::QCameraMap<cam_auto_exposure_mode_type>
+        QCameraParameters::AUTO_EXPOSURE_MAP[] = {
+    { AUTO_EXPOSURE_FRAME_AVG,           CAM_AEC_MODE_FRAME_AVERAGE },
+    { AUTO_EXPOSURE_CENTER_WEIGHTED,     CAM_AEC_MODE_CENTER_WEIGHTED },
+    { AUTO_EXPOSURE_SPOT_METERING,       CAM_AEC_MODE_SPOT_METERING },
+    { AUTO_EXPOSURE_SMART_METERING,      CAM_AEC_MODE_SMART_METERING },
+    { AUTO_EXPOSURE_USER_METERING,       CAM_AEC_MODE_USER_METERING },
+    { AUTO_EXPOSURE_SPOT_METERING_ADV,   CAM_AEC_MODE_SPOT_METERING_ADV },
+    { AUTO_EXPOSURE_CENTER_WEIGHTED_ADV, CAM_AEC_MODE_CENTER_WEIGHTED_ADV },
+};
+
+const QCameraParameters::QCameraMap<cam_format_t>
+        QCameraParameters::PREVIEW_FORMATS_MAP[] = {
+    {PIXEL_FORMAT_YUV420SP,        CAM_FORMAT_YUV_420_NV21},
+    {PIXEL_FORMAT_YUV420P,         CAM_FORMAT_YUV_420_YV12},
+    {PIXEL_FORMAT_YUV420SP_ADRENO, CAM_FORMAT_YUV_420_NV21_ADRENO},
+    {PIXEL_FORMAT_YV12,            CAM_FORMAT_YUV_420_YV12},
+    {PIXEL_FORMAT_NV12,            CAM_FORMAT_YUV_420_NV12},
+    {QC_PIXEL_FORMAT_NV12_VENUS,   CAM_FORMAT_YUV_420_NV12_VENUS}
+};
+
+const QCameraParameters::QCameraMap<cam_format_t>
+        QCameraParameters::PICTURE_TYPES_MAP[] = {
+    {PIXEL_FORMAT_JPEG,                          CAM_FORMAT_JPEG},
+    {PIXEL_FORMAT_YUV420SP,                      CAM_FORMAT_YUV_420_NV21},
+    {PIXEL_FORMAT_YUV422SP,                      CAM_FORMAT_YUV_422_NV16},
+    {QC_PIXEL_FORMAT_YUV_RAW_8BIT_YUYV,          CAM_FORMAT_YUV_RAW_8BIT_YUYV},
+    {QC_PIXEL_FORMAT_YUV_RAW_8BIT_YVYU,          CAM_FORMAT_YUV_RAW_8BIT_YVYU},
+    {QC_PIXEL_FORMAT_YUV_RAW_8BIT_UYVY,          CAM_FORMAT_YUV_RAW_8BIT_UYVY},
+    {QC_PIXEL_FORMAT_YUV_RAW_8BIT_VYUY,          CAM_FORMAT_YUV_RAW_8BIT_VYUY},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8GBRG,       CAM_FORMAT_BAYER_QCOM_RAW_8BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8GRBG,       CAM_FORMAT_BAYER_QCOM_RAW_8BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8RGGB,       CAM_FORMAT_BAYER_QCOM_RAW_8BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8BGGR,       CAM_FORMAT_BAYER_QCOM_RAW_8BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10GBRG,      CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10GRBG,      CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10RGGB,      CAM_FORMAT_BAYER_QCOM_RAW_10BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10BGGR,      CAM_FORMAT_BAYER_QCOM_RAW_10BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12GBRG,      CAM_FORMAT_BAYER_QCOM_RAW_12BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12GRBG,      CAM_FORMAT_BAYER_QCOM_RAW_12BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12RGGB,      CAM_FORMAT_BAYER_QCOM_RAW_12BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12BGGR,      CAM_FORMAT_BAYER_QCOM_RAW_12BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8GBRG,       CAM_FORMAT_BAYER_MIPI_RAW_8BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8GRBG,       CAM_FORMAT_BAYER_MIPI_RAW_8BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8RGGB,       CAM_FORMAT_BAYER_MIPI_RAW_8BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8BGGR,       CAM_FORMAT_BAYER_MIPI_RAW_8BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10GBRG,      CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10GRBG,      CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10RGGB,      CAM_FORMAT_BAYER_MIPI_RAW_10BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10BGGR,      CAM_FORMAT_BAYER_MIPI_RAW_10BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12GBRG,      CAM_FORMAT_BAYER_MIPI_RAW_12BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12GRBG,      CAM_FORMAT_BAYER_MIPI_RAW_12BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12RGGB,      CAM_FORMAT_BAYER_MIPI_RAW_12BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12BGGR,      CAM_FORMAT_BAYER_MIPI_RAW_12BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8GBRG,     CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8GRBG,     CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8RGGB,     CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8BGGR,     CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10GBRG,    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10GRBG,    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10RGGB,    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10BGGR,    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12GBRG,    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12GRBG,    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12RGGB,    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12BGGR,    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8GBRG,     CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8GRBG,     CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8RGGB,     CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8BGGR,     CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10GBRG,    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10GRBG,    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10RGGB,    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10BGGR,    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12GBRG,    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12GRBG,    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12RGGB,    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12BGGR,    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8GBRG,   CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8GRBG,   CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8RGGB,   CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8BGGR,   CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8GBRG,  CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8GRBG,  CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8RGGB,  CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8BGGR,  CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10GBRG, CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10GRBG, CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10RGGB, CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10BGGR, CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_BGGR},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12GBRG, CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_GBRG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12GRBG, CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_GRBG},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12RGGB, CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_RGGB},
+    {QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12BGGR, CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_BGGR}
+};
+
+const QCameraParameters::QCameraMap<cam_focus_mode_type>
+        QCameraParameters::FOCUS_MODES_MAP[] = {
+    { FOCUS_MODE_AUTO,               CAM_FOCUS_MODE_AUTO },
+    { FOCUS_MODE_INFINITY,           CAM_FOCUS_MODE_INFINITY },
+    { FOCUS_MODE_MACRO,              CAM_FOCUS_MODE_MACRO },
+    { FOCUS_MODE_FIXED,              CAM_FOCUS_MODE_FIXED },
+    { FOCUS_MODE_EDOF,               CAM_FOCUS_MODE_EDOF },
+    { FOCUS_MODE_CONTINUOUS_PICTURE, CAM_FOCUS_MODE_CONTINOUS_PICTURE },
+    { FOCUS_MODE_CONTINUOUS_VIDEO,   CAM_FOCUS_MODE_CONTINOUS_VIDEO },
+    { FOCUS_MODE_MANUAL_POSITION,    CAM_FOCUS_MODE_MANUAL},
+};
+
+const QCameraParameters::QCameraMap<cam_effect_mode_type>
+        QCameraParameters::EFFECT_MODES_MAP[] = {
+    { EFFECT_NONE,       CAM_EFFECT_MODE_OFF },
+    { EFFECT_MONO,       CAM_EFFECT_MODE_MONO },
+    { EFFECT_NEGATIVE,   CAM_EFFECT_MODE_NEGATIVE },
+    { EFFECT_SOLARIZE,   CAM_EFFECT_MODE_SOLARIZE },
+    { EFFECT_SEPIA,      CAM_EFFECT_MODE_SEPIA },
+    { EFFECT_POSTERIZE,  CAM_EFFECT_MODE_POSTERIZE },
+    { EFFECT_WHITEBOARD, CAM_EFFECT_MODE_WHITEBOARD },
+    { EFFECT_BLACKBOARD, CAM_EFFECT_MODE_BLACKBOARD },
+    { EFFECT_AQUA,       CAM_EFFECT_MODE_AQUA },
+    { EFFECT_EMBOSS,     CAM_EFFECT_MODE_EMBOSS },
+    { EFFECT_SKETCH,     CAM_EFFECT_MODE_SKETCH },
+    { EFFECT_NEON,       CAM_EFFECT_MODE_NEON }
+};
+
+const QCameraParameters::QCameraMap<cam_scene_mode_type>
+        QCameraParameters::SCENE_MODES_MAP[] = {
+    { SCENE_MODE_AUTO,           CAM_SCENE_MODE_OFF },
+    { SCENE_MODE_ACTION,         CAM_SCENE_MODE_ACTION },
+    { SCENE_MODE_PORTRAIT,       CAM_SCENE_MODE_PORTRAIT },
+    { SCENE_MODE_LANDSCAPE,      CAM_SCENE_MODE_LANDSCAPE },
+    { SCENE_MODE_NIGHT,          CAM_SCENE_MODE_NIGHT },
+    { SCENE_MODE_NIGHT_PORTRAIT, CAM_SCENE_MODE_NIGHT_PORTRAIT },
+    { SCENE_MODE_THEATRE,        CAM_SCENE_MODE_THEATRE },
+    { SCENE_MODE_BEACH,          CAM_SCENE_MODE_BEACH },
+    { SCENE_MODE_SNOW,           CAM_SCENE_MODE_SNOW },
+    { SCENE_MODE_SUNSET,         CAM_SCENE_MODE_SUNSET },
+    { SCENE_MODE_STEADYPHOTO,    CAM_SCENE_MODE_ANTISHAKE },
+    { SCENE_MODE_FIREWORKS ,     CAM_SCENE_MODE_FIREWORKS },
+    { SCENE_MODE_SPORTS ,        CAM_SCENE_MODE_SPORTS },
+    { SCENE_MODE_PARTY,          CAM_SCENE_MODE_PARTY },
+    { SCENE_MODE_CANDLELIGHT,    CAM_SCENE_MODE_CANDLELIGHT },
+    { SCENE_MODE_ASD,            CAM_SCENE_MODE_AUTO },
+    { SCENE_MODE_BACKLIGHT,      CAM_SCENE_MODE_BACKLIGHT },
+    { SCENE_MODE_FLOWERS,        CAM_SCENE_MODE_FLOWERS },
+    { SCENE_MODE_AR,             CAM_SCENE_MODE_AR },
+    { SCENE_MODE_HDR,            CAM_SCENE_MODE_HDR },
+};
+
+const QCameraParameters::QCameraMap<cam_flash_mode_t>
+        QCameraParameters::FLASH_MODES_MAP[] = {
+    { FLASH_MODE_OFF,   CAM_FLASH_MODE_OFF },
+    { FLASH_MODE_AUTO,  CAM_FLASH_MODE_AUTO },
+    { FLASH_MODE_ON,    CAM_FLASH_MODE_ON },
+    { FLASH_MODE_TORCH, CAM_FLASH_MODE_TORCH }
+};
+
+const QCameraParameters::QCameraMap<cam_focus_algorithm_type>
+         QCameraParameters::FOCUS_ALGO_MAP[] = {
+    { FOCUS_ALGO_AUTO,            CAM_FOCUS_ALGO_AUTO },
+    { FOCUS_ALGO_SPOT_METERING,   CAM_FOCUS_ALGO_SPOT },
+    { FOCUS_ALGO_CENTER_WEIGHTED, CAM_FOCUS_ALGO_CENTER_WEIGHTED },
+    { FOCUS_ALGO_FRAME_AVERAGE,   CAM_FOCUS_ALGO_AVERAGE }
+};
+
+const QCameraParameters::QCameraMap<cam_wb_mode_type>
+        QCameraParameters::WHITE_BALANCE_MODES_MAP[] = {
+    { WHITE_BALANCE_AUTO,            CAM_WB_MODE_AUTO },
+    { WHITE_BALANCE_INCANDESCENT,    CAM_WB_MODE_INCANDESCENT },
+    { WHITE_BALANCE_FLUORESCENT,     CAM_WB_MODE_FLUORESCENT },
+    { WHITE_BALANCE_WARM_FLUORESCENT,CAM_WB_MODE_WARM_FLUORESCENT},
+    { WHITE_BALANCE_DAYLIGHT,        CAM_WB_MODE_DAYLIGHT },
+    { WHITE_BALANCE_CLOUDY_DAYLIGHT, CAM_WB_MODE_CLOUDY_DAYLIGHT },
+    { WHITE_BALANCE_TWILIGHT,        CAM_WB_MODE_TWILIGHT },
+    { WHITE_BALANCE_SHADE,           CAM_WB_MODE_SHADE },
+    { WHITE_BALANCE_MANUAL,          CAM_WB_MODE_MANUAL},
+};
+
+const QCameraParameters::QCameraMap<cam_antibanding_mode_type>
+        QCameraParameters::ANTIBANDING_MODES_MAP[] = {
+    { ANTIBANDING_OFF,  CAM_ANTIBANDING_MODE_OFF },
+    { ANTIBANDING_50HZ, CAM_ANTIBANDING_MODE_50HZ },
+    { ANTIBANDING_60HZ, CAM_ANTIBANDING_MODE_60HZ },
+    { ANTIBANDING_AUTO, CAM_ANTIBANDING_MODE_AUTO }
+};
+
+const QCameraParameters::QCameraMap<cam_iso_mode_type>
+        QCameraParameters::ISO_MODES_MAP[] = {
+    { ISO_AUTO,  CAM_ISO_MODE_AUTO },
+    { ISO_HJR,   CAM_ISO_MODE_DEBLUR },
+    { ISO_100,   CAM_ISO_MODE_100 },
+    { ISO_200,   CAM_ISO_MODE_200 },
+    { ISO_400,   CAM_ISO_MODE_400 },
+    { ISO_800,   CAM_ISO_MODE_800 },
+    { ISO_1600,  CAM_ISO_MODE_1600 },
+    { ISO_3200,  CAM_ISO_MODE_3200 }
+};
+
+const QCameraParameters::QCameraMap<cam_hfr_mode_t>
+        QCameraParameters::HFR_MODES_MAP[] = {
+    { VIDEO_HFR_OFF, CAM_HFR_MODE_OFF },
+    { VIDEO_HFR_2X, CAM_HFR_MODE_60FPS },
+    { VIDEO_HFR_3X, CAM_HFR_MODE_90FPS },
+    { VIDEO_HFR_4X, CAM_HFR_MODE_120FPS },
+    { VIDEO_HFR_5X, CAM_HFR_MODE_150FPS },
+    { VIDEO_HFR_6X, CAM_HFR_MODE_180FPS },
+    { VIDEO_HFR_7X, CAM_HFR_MODE_210FPS },
+    { VIDEO_HFR_8X, CAM_HFR_MODE_240FPS },
+    { VIDEO_HFR_9X, CAM_HFR_MODE_480FPS }
+};
+
+const QCameraParameters::QCameraMap<cam_bracket_mode>
+        QCameraParameters::BRACKETING_MODES_MAP[] = {
+    { AE_BRACKET_OFF, CAM_EXP_BRACKETING_OFF },
+    { AE_BRACKET,     CAM_EXP_BRACKETING_ON }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::ON_OFF_MODES_MAP[] = {
+    { VALUE_OFF, 0 },
+    { VALUE_ON,  1 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::TOUCH_AF_AEC_MODES_MAP[] = {
+    { QCameraParameters::TOUCH_AF_AEC_OFF, 0 },
+    { QCameraParameters::TOUCH_AF_AEC_ON, 1 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::ENABLE_DISABLE_MODES_MAP[] = {
+    { VALUE_ENABLE,  1 },
+    { VALUE_DISABLE, 0 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::DENOISE_ON_OFF_MODES_MAP[] = {
+    { DENOISE_OFF, 0 },
+    { DENOISE_ON,  1 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::TRUE_FALSE_MODES_MAP[] = {
+    { VALUE_FALSE, 0},
+    { VALUE_TRUE,  1}
+};
+
+const QCameraParameters::QCameraMap<cam_flip_t>
+        QCameraParameters::FLIP_MODES_MAP[] = {
+    {FLIP_MODE_OFF, FLIP_NONE},
+    {FLIP_MODE_V, FLIP_V},
+    {FLIP_MODE_H, FLIP_H},
+    {FLIP_MODE_VH, FLIP_V_H}
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::AF_BRACKETING_MODES_MAP[] = {
+    { AF_BRACKET_OFF, 0 },
+    { AF_BRACKET_ON,  1 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::RE_FOCUS_MODES_MAP[] = {
+    { RE_FOCUS_OFF, 0 },
+    { RE_FOCUS_ON,  1 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::CHROMA_FLASH_MODES_MAP[] = {
+    { CHROMA_FLASH_OFF, 0 },
+    { CHROMA_FLASH_ON,  1 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::OPTI_ZOOM_MODES_MAP[] = {
+    { OPTI_ZOOM_OFF, 0 },
+    { OPTI_ZOOM_ON,  1 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::TRUE_PORTRAIT_MODES_MAP[] = {
+    { TRUE_PORTRAIT_OFF, 0 },
+    { TRUE_PORTRAIT_ON,  1 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::STILL_MORE_MODES_MAP[] = {
+    { STILL_MORE_OFF, 0 },
+    { STILL_MORE_ON,  1 }
+};
+
+const QCameraParameters::QCameraMap<cam_cds_mode_type_t>
+        QCameraParameters::CDS_MODES_MAP[] = {
+    { CDS_MODE_OFF, CAM_CDS_MODE_OFF },
+    { CDS_MODE_ON, CAM_CDS_MODE_ON },
+    { CDS_MODE_AUTO, CAM_CDS_MODE_AUTO}
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::HDR_MODES_MAP[] = {
+    { HDR_MODE_SENSOR, 0 },
+    { HDR_MODE_MULTI_FRAME, 1 }
+};
+
+const QCameraParameters::QCameraMap<int>
+        QCameraParameters::VIDEO_ROTATION_MODES_MAP[] = {
+    { VIDEO_ROTATION_0, 0 },
+    { VIDEO_ROTATION_90, 90 },
+    { VIDEO_ROTATION_180, 180 },
+    { VIDEO_ROTATION_270, 270 }
+};
+
+#define DEFAULT_CAMERA_AREA "(0, 0, 0, 0, 0)"
+#define DATA_PTR(MEM_OBJ,INDEX) MEM_OBJ->getPtr( INDEX )
+#define TOTAL_RAM_SIZE_512MB 536870912
+#define PARAM_MAP_SIZE(MAP) (sizeof(MAP)/sizeof(MAP[0]))
+
+/*===========================================================================
+ * FUNCTION   : QCameraParameters
+ *
+ * DESCRIPTION: default constructor of QCameraParameters
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraParameters::QCameraParameters()
+    : CameraParameters(),
+      m_reprocScaleParam(this),
+      m_pCapability(NULL),
+      m_pCamOpsTbl(NULL),
+      m_pParamHeap(NULL),
+      m_pParamBuf(NULL),
+      mIsType(IS_TYPE_NONE),
+      m_bZslMode(false),
+      m_bZslMode_new(false),
+      m_bForceZslMode(false),
+      m_bRecordingHint(false),
+      m_bRecordingHint_new(false),
+      m_bHistogramEnabled(false),
+      m_nFaceProcMask(0),
+      m_bFaceDetectionOn(0),
+      m_bDebugFps(false),
+      mFocusMode(CAM_FOCUS_MODE_MAX),
+      mPreviewFormat(CAM_FORMAT_YUV_420_NV21),
+      mPictureFormat(CAM_FORMAT_JPEG),
+      m_bNeedRestart(false),
+      m_bNoDisplayMode(false),
+      m_bWNROn(false),
+      m_bTNRPreviewOn(false),
+      m_bTNRVideoOn(false),
+      m_bInited(false),
+      m_nBurstNum(1),
+      m_nRetroBurstNum(0),
+      m_nBurstLEDOnPeriod(100),
+      m_bUpdateEffects(false),
+      m_bSceneTransitionAuto(false),
+      m_bPreviewFlipChanged(false),
+      m_bVideoFlipChanged(false),
+      m_bSnapshotFlipChanged(false),
+      m_bFixedFrameRateSet(false),
+      m_bHDREnabled(false),
+      m_bAVTimerEnabled(false),
+      m_bDISEnabled(false),
+      m_MobiMask(0),
+      m_AdjustFPS(NULL),
+      m_bHDR1xFrameEnabled(true),
+      m_HDRSceneEnabled(false),
+      m_bHDRThumbnailProcessNeeded(false),
+      m_bHDR1xExtraBufferNeeded(true),
+      m_bHDROutputCropEnabled(false),
+      m_tempMap(),
+      m_bAFBracketingOn(false),
+      m_bReFocusOn(false),
+      m_bChromaFlashOn(false),
+      m_bOptiZoomOn(false),
+      m_bSceneSelection(false),
+      m_SelectedScene(CAM_SCENE_MODE_MAX),
+      m_bSeeMoreOn(false),
+      m_bStillMoreOn(false),
+      m_bHfrMode(false),
+      m_bSensorHDREnabled(false),
+      m_bRdiMode(false),
+      m_bDisplayFrame(true),
+      m_bSecureMode(false),
+      m_bAeBracketingEnabled(false),
+      mFlashValue(CAM_FLASH_MODE_OFF),
+      mFlashDaemonValue(CAM_FLASH_MODE_OFF),
+      mHfrMode(CAM_HFR_MODE_OFF),
+      m_bHDRModeSensor(true),
+      mOfflineRAW(false),
+      m_bTruePortraitOn(false),
+      mCds_mode(CAM_CDS_MODE_OFF)
+{
+    char value[PROPERTY_VALUE_MAX];
+    // TODO: may move to parameter instead of sysprop
+    property_get("persist.debug.sf.showfps", value, "0");
+    m_bDebugFps = atoi(value) > 0 ? true : false;
+
+    // For thermal mode, it should be set as system property
+    // because system property applies to all applications, while
+    // parameters only apply to specific app.
+    property_get("persist.camera.thermal.mode", value, "fps");
+    if (!strcmp(value, "frameskip")) {
+        m_ThermalMode = QCAMERA_THERMAL_ADJUST_FRAMESKIP;
+    } else {
+        if (strcmp(value, "fps"))
+            ALOGE("%s: Invalid camera thermal mode %s", __func__, value);
+        m_ThermalMode = QCAMERA_THERMAL_ADJUST_FPS;
+    }
+
+    memset(&m_LiveSnapshotSize, 0, sizeof(m_LiveSnapshotSize));
+    memset(&m_default_fps_range, 0, sizeof(m_default_fps_range));
+    memset(&m_hfrFpsRange, 0, sizeof(m_hfrFpsRange));
+    memset(&m_stillmore_config, 0, sizeof(cam_still_more_t));
+    memset(&m_captureFrameConfig, 0, sizeof(cam_capture_frame_config_t));
+    mTotalPPCount = 0;
+    mZoomLevel = 0;
+    mParmZoomLevel = 0;
+    mCurPPCount = 0;
+    mBufBatchCnt = 0;
+    mRotation = 0;
+    mJpegRotation = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraParameters
+ *
+ * DESCRIPTION: constructor of QCameraParameters
+ *
+ * PARAMETERS :
+ *   @params  : parameters in string
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraParameters::QCameraParameters(const String8 &params)
+    : CameraParameters(params),
+    m_reprocScaleParam(this),
+    m_pCapability(NULL),
+    m_pCamOpsTbl(NULL),
+    m_pParamHeap(NULL),
+    m_pParamBuf(NULL),
+    m_bZslMode(false),
+    m_bZslMode_new(false),
+    m_bForceZslMode(false),
+    m_bRecordingHint(false),
+    m_bRecordingHint_new(false),
+    m_bHistogramEnabled(false),
+    m_nFaceProcMask(0),
+    m_bDebugFps(false),
+    mFocusMode(CAM_FOCUS_MODE_MAX),
+    mPreviewFormat(CAM_FORMAT_YUV_420_NV21),
+    mPictureFormat(CAM_FORMAT_JPEG),
+    m_bNeedRestart(false),
+    m_bNoDisplayMode(false),
+    m_bWNROn(false),
+    m_bTNRPreviewOn(false),
+    m_bTNRVideoOn(false),
+    m_bInited(false),
+    m_nBurstNum(1),
+    m_nRetroBurstNum(0),
+    m_nBurstLEDOnPeriod(100),
+    m_bPreviewFlipChanged(false),
+    m_bVideoFlipChanged(false),
+    m_bSnapshotFlipChanged(false),
+    m_bFixedFrameRateSet(false),
+    m_bHDREnabled(false),
+    m_bAVTimerEnabled(false),
+    m_AdjustFPS(NULL),
+    m_bHDR1xFrameEnabled(true),
+    m_HDRSceneEnabled(false),
+    m_bHDRThumbnailProcessNeeded(false),
+    m_bHDR1xExtraBufferNeeded(true),
+    m_bHDROutputCropEnabled(false),
+    m_tempMap(),
+    m_bAFBracketingOn(false),
+    m_bReFocusOn(false),
+    m_bChromaFlashOn(false),
+    m_bOptiZoomOn(false),
+    m_bSceneSelection(false),
+    m_SelectedScene(CAM_SCENE_MODE_MAX),
+    m_bSeeMoreOn(false),
+    m_bStillMoreOn(false),
+    m_bHfrMode(false),
+    m_bSensorHDREnabled(false),
+    m_bRdiMode(false),
+    m_bSecureMode(false),
+    m_bAeBracketingEnabled(false),
+    mFlashValue(CAM_FLASH_MODE_OFF),
+    mFlashDaemonValue(CAM_FLASH_MODE_OFF),
+    mHfrMode(CAM_HFR_MODE_OFF),
+    m_bHDRModeSensor(true),
+    mOfflineRAW(false),
+    m_bTruePortraitOn(false),
+    mCds_mode(CAM_CDS_MODE_OFF),
+    mParmEffect(CAM_EFFECT_MODE_OFF)
+{
+    memset(&m_LiveSnapshotSize, 0, sizeof(m_LiveSnapshotSize));
+    memset(&m_default_fps_range, 0, sizeof(m_default_fps_range));
+    memset(&m_hfrFpsRange, 0, sizeof(m_hfrFpsRange));
+    memset(&m_stillmore_config, 0, sizeof(cam_still_more_t));
+    mTotalPPCount = 0;
+    mZoomLevel = 0;
+    mParmZoomLevel = 0;
+    mCurPPCount = 0;
+    mRotation = 0;
+    mJpegRotation = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraParameters
+ *
+ * DESCRIPTION: deconstructor of QCameraParameters
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraParameters::~QCameraParameters()
+{
+    deinit();
+}
+
+/*===========================================================================
+ * FUNCTION   : createSizesString
+ *
+ * DESCRIPTION: create string obj contains array of dimensions
+ *
+ * PARAMETERS :
+ *   @sizes   : array of dimensions
+ *   @len     : size of dimension array
+ *
+ * RETURN     : string obj
+ *==========================================================================*/
+String8 QCameraParameters::createSizesString(const cam_dimension_t *sizes, size_t len)
+{
+    String8 str;
+    char buffer[32];
+
+    if (len > 0) {
+        snprintf(buffer, sizeof(buffer), "%dx%d", sizes[0].width, sizes[0].height);
+        str.append(buffer);
+    }
+    for (size_t i = 1; i < len; i++) {
+        snprintf(buffer, sizeof(buffer), ",%dx%d",
+                sizes[i].width, sizes[i].height);
+        str.append(buffer);
+    }
+    return str;
+}
+
+/*===========================================================================
+ * FUNCTION   : createValuesString
+ *
+ * DESCRIPTION: create string obj contains array of values from map when matched
+ *              from input values array
+ *
+ * PARAMETERS :
+ *   @values  : array of values
+ *   @len     : size of values array
+ *   @map     : map contains the mapping between values and enums
+ *   @map_len : size of the map
+ *
+ * RETURN     : string obj
+ *==========================================================================*/
+template <typename valuesType, class mapType> String8 createValuesString(
+        const valuesType *values, size_t len, const mapType *map, size_t map_len)
+{
+    String8 str;
+    int count = 0;
+
+    for (size_t i = 0; i < len; i++ ) {
+        for (size_t j = 0; j < map_len; j ++)
+            if (map[j].val == values[i]) {
+                if (NULL != map[j].desc) {
+                    if (count > 0) {
+                        str.append(",");
+                    }
+                    str.append(map[j].desc);
+                    count++;
+                    break; //loop j
+                }
+            }
+    }
+    return str;
+}
+
+/*===========================================================================
+ * FUNCTION   : createValuesStringFromMap
+ *
+ * DESCRIPTION: create string obj contains array of values directly from map
+ *
+ * PARAMETERS :
+ *   @map     : map contains the mapping between values and enums
+ *   @map_len : size of the map
+ *
+ * RETURN     : string obj
+ *==========================================================================*/
+template <class mapType> String8 createValuesStringFromMap(
+        const mapType *map, size_t map_len)
+{
+    String8 str;
+
+    for (size_t i = 0; i < map_len; i++) {
+        if (NULL != map[i].desc) {
+            if (i > 0) {
+                str.append(",");
+            }
+            str.append(map[i].desc);
+        }
+    }
+    return str;
+}
+
+/*===========================================================================
+ * FUNCTION   : createZoomRatioValuesString
+ *
+ * DESCRIPTION: create string obj contains array of zoom ratio values
+ *
+ * PARAMETERS :
+ *   @zoomRaios  : array of zoom ratios
+ *   @length     : size of the array
+ *
+ * RETURN     : string obj
+ *==========================================================================*/
+String8 QCameraParameters::createZoomRatioValuesString(uint32_t *zoomRatios,
+        size_t length)
+{
+    String8 str;
+    char buffer[32] = {0};
+
+    if(length > 0){
+        snprintf(buffer, sizeof(buffer), "%d", zoomRatios[0]);
+        str.append(buffer);
+    }
+
+    for (size_t i = 1; i < length; i++) {
+        memset(buffer, 0, sizeof(buffer));
+        snprintf(buffer, sizeof(buffer), ",%d", zoomRatios[i]);
+        str.append(buffer);
+    }
+    return str;
+}
+
+/*===========================================================================
+ * FUNCTION   : createHfrValuesString
+ *
+ * DESCRIPTION: create string obj contains array of hfr values from map when
+ *              matched from input hfr values
+ *
+ * PARAMETERS :
+ *   @values  : array of hfr info
+ *   @len     : size of the array
+ *   @map     : map of hfr string value and enum
+ *   map_len  : size of map
+ *
+ * RETURN     : string obj
+ *==========================================================================*/
+String8 QCameraParameters::createHfrValuesString(const cam_hfr_info_t *values,
+        size_t len, const QCameraMap<cam_hfr_mode_t> *map, size_t map_len)
+{
+    String8 str;
+    int count = 0;
+
+    char value[PROPERTY_VALUE_MAX];
+    int8_t batch_count = 0;
+
+    property_get("persist.camera.batchcount", value, "0");
+    batch_count = atoi(value);
+
+    for (size_t i = 0; i < len; i++ ) {
+        for (size_t j = 0; j < map_len; j ++) {
+            if ((batch_count < CAMERA_MIN_BATCH_COUNT)
+                    && (map[j].val > CAM_HFR_MODE_120FPS)) {
+                /*TODO: Work around. Need to revert when we have
+                complete 240fps support*/
+                break;
+            }
+            if (map[j].val == (int)values[i].mode) {
+                if (NULL != map[j].desc) {
+                    if (count > 0) {
+                        str.append(",");
+                    }
+                     str.append(map[j].desc);
+                     count++;
+                     break; //loop j
+                }
+            }
+        }
+    }
+    if (count > 0) {
+        str.append(",");
+    }
+    str.append(VIDEO_HFR_OFF);
+    return str;
+}
+
+/*===========================================================================
+ * FUNCTION   : createHfrSizesString
+ *
+ * DESCRIPTION: create string obj contains array of hfr sizes
+ *
+ * PARAMETERS :
+ *   @values  : array of hfr info
+ *   @len     : size of the array
+ *
+ * RETURN     : string obj
+ *==========================================================================*/
+String8 QCameraParameters::createHfrSizesString(const cam_hfr_info_t *values, size_t len)
+{
+    String8 str;
+    char buffer[32];
+
+    if (len > 0) {
+        snprintf(buffer, sizeof(buffer), "%dx%d",
+                 values[0].dim.width, values[0].dim.height);
+        str.append(buffer);
+    }
+    for (size_t i = 1; i < len; i++) {
+        snprintf(buffer, sizeof(buffer), ",%dx%d",
+                 values[i].dim.width, values[i].dim.height);
+        str.append(buffer);
+    }
+    return str;
+}
+
+/*===========================================================================
+ * FUNCTION   : createFpsString
+ *
+ * DESCRIPTION: create string obj contains array of FPS rates
+ *
+ * PARAMETERS :
+ *   @fps     : default fps range
+ *
+ * RETURN     : string obj
+ *==========================================================================*/
+String8 QCameraParameters::createFpsString(cam_fps_range_t &fps)
+{
+    char buffer[32];
+    String8 fpsValues;
+
+    int min_fps = int(fps.min_fps);
+    int max_fps = int(fps.max_fps);
+
+    if (min_fps < fps.min_fps){
+        min_fps++;
+    }
+    if (max_fps > fps.max_fps) {
+        max_fps--;
+    }
+    if (min_fps <= max_fps) {
+        snprintf(buffer, sizeof(buffer), "%d", min_fps);
+        fpsValues.append(buffer);
+    }
+
+    for (int i = min_fps+1; i <= max_fps; i++) {
+        snprintf(buffer, sizeof(buffer), ",%d", i);
+        fpsValues.append(buffer);
+    }
+
+    return fpsValues;
+}
+
+/*===========================================================================
+ * FUNCTION   : createFpsRangeString
+ *
+ * DESCRIPTION: create string obj contains array of FPS ranges
+ *
+ * PARAMETERS :
+ *   @fps     : array of fps ranges
+ *   @len     : size of the array
+ *   @default_fps_index : reference to index of default fps range
+ *
+ * RETURN     : string obj
+ *==========================================================================*/
+String8 QCameraParameters::createFpsRangeString(const cam_fps_range_t* fps,
+        size_t len, int &default_fps_index)
+{
+    String8 str;
+    char buffer[32];
+    int max_range = 0;
+    int min_fps, max_fps;
+
+    if (len > 0) {
+        min_fps = int(fps[0].min_fps * 1000);
+        max_fps = int(fps[0].max_fps * 1000);
+        max_range = max_fps - min_fps;
+        default_fps_index = 0;
+        snprintf(buffer, sizeof(buffer), "(%d,%d)", min_fps, max_fps);
+        str.append(buffer);
+    }
+    for (size_t i = 1; i < len; i++) {
+        min_fps = int(fps[i].min_fps * 1000);
+        max_fps = int(fps[i].max_fps * 1000);
+        if (max_range < (max_fps - min_fps)) {
+            max_range = max_fps - min_fps;
+            default_fps_index = (int)i;
+        }
+        snprintf(buffer, sizeof(buffer), ",(%d,%d)", min_fps, max_fps);
+        str.append(buffer);
+    }
+    return str;
+}
+
+/*===========================================================================
+ * FUNCTION   : lookupAttr
+ *
+ * DESCRIPTION: lookup a value by its name
+ *
+ * PARAMETERS :
+ *   @attr    : map contains <name, value>
+ *   @len     : size of the map
+ *   @name    : name to be looked up
+ *
+ * RETURN     : valid value if found
+ *              NAME_NOT_FOUND if not found
+ *==========================================================================*/
+template <class mapType> int lookupAttr(const mapType *arr,
+        size_t len, const char *name)
+{
+    if (name) {
+        for (size_t i = 0; i < len; i++) {
+            if (!strcmp(arr[i].desc, name))
+                return arr[i].val;
+        }
+    }
+    return NAME_NOT_FOUND;
+}
+
+/*===========================================================================
+ * FUNCTION   : lookupNameByValue
+ *
+ * DESCRIPTION: lookup a name by its value
+ *
+ * PARAMETERS :
+ *   @attr    : map contains <name, value>
+ *   @len     : size of the map
+ *   @value   : value to be looked up
+ *
+ * RETURN     : name str or NULL if not found
+ *==========================================================================*/
+template <class mapType> const char *lookupNameByValue(const mapType *arr,
+        size_t len, int value)
+{
+    for (size_t i = 0; i < len; i++) {
+        if (arr[i].val == value) {
+            return arr[i].desc;
+        }
+    }
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : setPreviewSize
+ *
+ * DESCRIPTION: set preview size from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setPreviewSize(const QCameraParameters& params)
+{
+    int width, height;
+    params.getPreviewSize(&width, &height);
+    ALOGI("Requested preview size %d x %d", width, height);
+
+    // Validate the preview size
+    for (size_t i = 0; i < m_pCapability->preview_sizes_tbl_cnt; ++i) {
+        if (width ==  m_pCapability->preview_sizes_tbl[i].width
+           && height ==  m_pCapability->preview_sizes_tbl[i].height) {
+            // check if need to restart preview in case of preview size change
+            int old_width, old_height;
+            CameraParameters::getPreviewSize(&old_width, &old_height);
+            if (width != old_width || height != old_height) {
+                m_bNeedRestart = true;
+            }
+
+            // set the new value
+            CameraParameters::setPreviewSize(width, height);
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid preview size requested: %dx%d", width, height);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setPictureSize
+ *
+ * DESCRIPTION: set picture size from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setPictureSize(const QCameraParameters& params)
+{
+    int width, height;
+    params.getPictureSize(&width, &height);
+    ALOGI("Requested picture size %d x %d", width, height);
+
+    // Validate the picture size
+    if(!m_reprocScaleParam.isScaleEnabled()){
+        for (size_t i = 0; i < m_pCapability->picture_sizes_tbl_cnt; ++i) {
+            if (width ==  m_pCapability->picture_sizes_tbl[i].width
+               && height ==  m_pCapability->picture_sizes_tbl[i].height) {
+                // check if need to restart preview in case of picture size change
+                int old_width, old_height;
+                CameraParameters::getPictureSize(&old_width, &old_height);
+                if ((m_bZslMode || m_bRecordingHint) &&
+                    (width != old_width || height != old_height)) {
+                    m_bNeedRestart = true;
+                }
+
+                // set the new value
+                CameraParameters::setPictureSize(width, height);
+                return NO_ERROR;
+            }
+        }
+    }else{
+        //should use scaled picture size table to validate
+        if(m_reprocScaleParam.setValidatePicSize(width, height) == NO_ERROR){
+            // check if need to restart preview in case of picture size change
+            int old_width, old_height;
+            CameraParameters::getPictureSize(&old_width, &old_height);
+            if ((m_bZslMode || m_bRecordingHint) &&
+                (width != old_width || height != old_height)) {
+                m_bNeedRestart = true;
+            }
+
+            // set the new value
+            char val[32];
+            snprintf(val, sizeof(val), "%dx%d", width, height);
+            updateParamEntry(KEY_PICTURE_SIZE, val);
+            CDBG("%s: %s", __func__, val);
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid picture size requested: %dx%d", width, height);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setVideoSize
+ *
+ * DESCRIPTION: set video size from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setVideoSize(const QCameraParameters& params)
+{
+    const char *str= NULL;
+    int width, height;
+    str = params.get(KEY_VIDEO_SIZE);
+    if(!str) {
+        //If application didn't set this parameter string, use the values from
+        //getPreviewSize() as video dimensions.
+        params.getPreviewSize(&width, &height);
+        ALOGE("No Record Size requested, use the preview dimensions");
+    } else {
+        params.getVideoSize(&width, &height);
+    }
+
+    // Validate the video size
+    for (size_t i = 0; i < m_pCapability->video_sizes_tbl_cnt; ++i) {
+        if (width ==  m_pCapability->video_sizes_tbl[i].width
+                && height ==  m_pCapability->video_sizes_tbl[i].height) {
+            // check if need to restart preview in case of video size change
+            int old_width, old_height;
+            CameraParameters::getVideoSize(&old_width, &old_height);
+            if (m_bRecordingHint &&
+               (width != old_width || height != old_height)) {
+                m_bNeedRestart = true;
+            }
+
+            // set the new value
+            CameraParameters::setVideoSize(width, height);
+            return NO_ERROR;
+        }
+    }
+
+    ALOGE("Invalid video size requested: %dx%d", width, height);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : getLiveSnapshotSize
+ *
+ * DESCRIPTION: get live snapshot size
+ *
+ * PARAMETERS : dim - Update dim with the liveshot size
+ *
+ *==========================================================================*/
+void QCameraParameters::getLiveSnapshotSize(cam_dimension_t &dim)
+{
+    if(is4k2kVideoResolution()) {
+        // We support maximum 8M liveshot @4K2K video resolution
+        cam_dimension_t resolution = {0, 0};
+        CameraParameters::getVideoSize(&resolution.width, &resolution.height);
+        if((m_LiveSnapshotSize.width > resolution.width) ||
+                (m_LiveSnapshotSize.height > resolution.height)) {
+            m_LiveSnapshotSize.width = resolution.width;
+            m_LiveSnapshotSize.height = resolution.height;
+        }
+    }
+    dim = m_LiveSnapshotSize;
+    CDBG_HIGH("%s: w x h: %d x %d", __func__, dim.width, dim.height);
+}
+
+/*===========================================================================
+ * FUNCTION   : setLiveSnapshotSize
+ *
+ * DESCRIPTION: set live snapshot size
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setLiveSnapshotSize(const QCameraParameters& params)
+{
+    char value[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.opt.livepic", value, "1");
+    bool useOptimal = atoi(value) > 0 ? true : false;
+
+    // use picture size from user setting
+    params.getPictureSize(&m_LiveSnapshotSize.width, &m_LiveSnapshotSize.height);
+
+    size_t livesnapshot_sizes_tbl_cnt =
+            m_pCapability->livesnapshot_sizes_tbl_cnt;
+    cam_dimension_t *livesnapshot_sizes_tbl =
+            &m_pCapability->livesnapshot_sizes_tbl[0];
+
+    if(is4k2kVideoResolution()) {
+        // We support maximum 8M liveshot @4K2K video resolution
+        cam_dimension_t resolution = {0, 0};
+        CameraParameters::getVideoSize(&resolution.width, &resolution.height);
+        if((m_LiveSnapshotSize.width > resolution.width) ||
+                (m_LiveSnapshotSize.height > resolution.height)) {
+            m_LiveSnapshotSize.width = resolution.width;
+            m_LiveSnapshotSize.height = resolution.height;
+        }
+    }
+
+    // check if HFR is enabled
+    const char *hfrStr = params.get(KEY_QC_VIDEO_HIGH_FRAME_RATE);
+    cam_hfr_mode_t hfrMode = CAM_HFR_MODE_OFF;
+    const char *hsrStr = params.get(KEY_QC_VIDEO_HIGH_SPEED_RECORDING);
+
+    if ((hsrStr != NULL) && strcmp(hsrStr, "off")) {
+        int32_t hsr = lookupAttr(HFR_MODES_MAP, PARAM_MAP_SIZE(HFR_MODES_MAP), hsrStr);
+        if ((hsr != NAME_NOT_FOUND) && (hsr > CAM_HFR_MODE_OFF)) {
+            // if HSR is enabled, change live snapshot size
+            for (size_t i = 0; i < m_pCapability->hfr_tbl_cnt; i++) {
+                if (m_pCapability->hfr_tbl[i].mode == hsr) {
+                    livesnapshot_sizes_tbl_cnt =
+                            m_pCapability->hfr_tbl[i].livesnapshot_sizes_tbl_cnt;
+                    livesnapshot_sizes_tbl =
+                            &m_pCapability->hfr_tbl[i].livesnapshot_sizes_tbl[0];
+                    hfrMode = m_pCapability->hfr_tbl[i].mode;
+                    break;
+                }
+            }
+        }
+    } else if ((hfrStr != NULL) && strcmp(hfrStr, "off")) {
+        int32_t hfr = lookupAttr(HFR_MODES_MAP, PARAM_MAP_SIZE(HFR_MODES_MAP), hfrStr);
+        if ((hfr != NAME_NOT_FOUND) && (hfr > CAM_HFR_MODE_OFF)) {
+            // if HFR is enabled, change live snapshot size
+            for (size_t i = 0; i < m_pCapability->hfr_tbl_cnt; i++) {
+                if (m_pCapability->hfr_tbl[i].mode == hfr) {
+                    livesnapshot_sizes_tbl_cnt =
+                            m_pCapability->hfr_tbl[i].livesnapshot_sizes_tbl_cnt;
+                    livesnapshot_sizes_tbl =
+                            &m_pCapability->hfr_tbl[i].livesnapshot_sizes_tbl[0];
+                    hfrMode = m_pCapability->hfr_tbl[i].mode;
+                    break;
+                }
+            }
+        }
+    }
+
+    if (useOptimal || hfrMode != CAM_HFR_MODE_OFF) {
+        bool found = false;
+
+        // first check if picture size is within the list of supported sizes
+        for (size_t i = 0; i < livesnapshot_sizes_tbl_cnt; ++i) {
+            if (m_LiveSnapshotSize.width == livesnapshot_sizes_tbl[i].width &&
+                m_LiveSnapshotSize.height == livesnapshot_sizes_tbl[i].height) {
+                found = true;
+                break;
+            }
+        }
+
+        if (!found) {
+            // use optimal live snapshot size from supported list,
+            // that has same preview aspect ratio
+            int width = 0, height = 0;
+            params.getPreviewSize(&width, &height);
+
+            double previewAspectRatio = (double)width / height;
+            for (size_t i = 0; i < livesnapshot_sizes_tbl_cnt; ++i) {
+                double ratio = (double)livesnapshot_sizes_tbl[i].width /
+                                livesnapshot_sizes_tbl[i].height;
+                if (fabs(previewAspectRatio - ratio) <= ASPECT_TOLERANCE) {
+                    m_LiveSnapshotSize = livesnapshot_sizes_tbl[i];
+                    found = true;
+                    break;
+                }
+            }
+
+            if (!found && hfrMode != CAM_HFR_MODE_OFF) {
+                // Cannot find matching aspect ration from supported live snapshot list
+                // choose the max dim from preview and video size
+                CDBG("%s: Cannot find matching aspect ratio, choose max of preview or video size", __func__);
+                params.getVideoSize(&m_LiveSnapshotSize.width, &m_LiveSnapshotSize.height);
+                if (m_LiveSnapshotSize.width < width && m_LiveSnapshotSize.height < height) {
+                    m_LiveSnapshotSize.width = width;
+                    m_LiveSnapshotSize.height = height;
+                }
+            }
+        }
+    }
+    CDBG_HIGH("%s: live snapshot size %d x %d", __func__,
+          m_LiveSnapshotSize.width, m_LiveSnapshotSize.height);
+
+    return NO_ERROR;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : setRawSize
+ *
+ * DESCRIPTION: set live snapshot size
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setRawSize(cam_dimension_t &dim)
+{
+    m_rawSize = dim;
+    return NO_ERROR;
+}
+/*===========================================================================
+ * FUNCTION   : setPreviewFormat
+ *
+ * DESCRIPTION: set preview format from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setPreviewFormat(const QCameraParameters& params)
+{
+    const char *str = params.getPreviewFormat();
+    int32_t previewFormat = lookupAttr(PREVIEW_FORMATS_MAP,
+            PARAM_MAP_SIZE(PREVIEW_FORMATS_MAP), str);
+    if (previewFormat != NAME_NOT_FOUND) {
+        mPreviewFormat = (cam_format_t)previewFormat;
+
+        CameraParameters::setPreviewFormat(str);
+        CDBG_HIGH("%s: format %d\n", __func__, mPreviewFormat);
+        return NO_ERROR;
+    }
+    ALOGE("Invalid preview format value: %s", (str == NULL) ? "NULL" : str);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setPictureFormat
+ *
+ * DESCRIPTION: set picture format from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setPictureFormat(const QCameraParameters& params)
+{
+    const char *str = params.getPictureFormat();
+    int32_t pictureFormat = lookupAttr(PICTURE_TYPES_MAP, PARAM_MAP_SIZE(PICTURE_TYPES_MAP), str);
+    if (pictureFormat != NAME_NOT_FOUND) {
+        mPictureFormat = pictureFormat;
+
+        CameraParameters::setPictureFormat(str);
+        CDBG_HIGH("%s: format %d\n", __func__, mPictureFormat);
+        return NO_ERROR;
+    }
+    ALOGE("%s: Invalid picture format value: %s", __func__, (str == NULL) ? "NULL" : str);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setJpegThumbnailSize
+ *
+ * DESCRIPTION: set jpeg thumbnail size from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setJpegThumbnailSize(const QCameraParameters& params)
+{
+    int width = params.getInt(KEY_JPEG_THUMBNAIL_WIDTH);
+    int height = params.getInt(KEY_JPEG_THUMBNAIL_HEIGHT);
+
+    CDBG("requested jpeg thumbnail size %d x %d", width, height);
+
+    size_t sizes_cnt = PARAM_MAP_SIZE(THUMBNAIL_SIZES_MAP);
+
+    cam_dimension_t dim;
+
+    // While taking livesnaphot match jpeg thumbnail size aspect
+    // ratio to liveshot size. For normal snapshot match thumbnail
+    // aspect ratio to picture size.
+    if (m_bRecordingHint) {
+        getLiveSnapshotSize(dim);
+    } else {
+        params.getPictureSize(&dim.width, &dim.height);
+    }
+
+    if (0 == dim.height) {
+        ALOGE("%s: picture size is invalid (%d x %d)", __func__, dim.width, dim.height);
+        return BAD_VALUE;
+    }
+    double picAspectRatio = (double)dim.width / (double)dim.height;
+
+    int optimalWidth = 0, optimalHeight = 0;
+    if (width != 0 || height != 0) {
+        // If input jpeg thumnmail size is (0,0), meaning no thumbnail needed
+        // hornor this setting.
+        // Otherwise, search for optimal jpeg thumbnail size that has the same
+        // aspect ratio as picture size.
+        // If missign jpeg thumbnail size with appropriate aspect ratio,
+        // just honor setting supplied by application.
+
+        // Try to find a size matches aspect ratio and has the largest width
+        for (size_t i = 0; i < sizes_cnt; i++) {
+            if (THUMBNAIL_SIZES_MAP[i].height == 0) {
+                // No thumbnail case, just skip
+                continue;
+            }
+            double ratio =
+                (double)THUMBNAIL_SIZES_MAP[i].width / THUMBNAIL_SIZES_MAP[i].height;
+            if (fabs(ratio - picAspectRatio) > ASPECT_TOLERANCE)  {
+                continue;
+            }
+            if (THUMBNAIL_SIZES_MAP[i].width > optimalWidth) {
+                optimalWidth = THUMBNAIL_SIZES_MAP[i].width;
+                optimalHeight = THUMBNAIL_SIZES_MAP[i].height;
+            }
+        }
+
+        if ((0 == optimalWidth) || (0 == optimalHeight)) {
+            // Optimal size not found
+            // Validate thumbnail size
+            for (size_t i = 0; i < sizes_cnt; i++) {
+                if (width == THUMBNAIL_SIZES_MAP[i].width &&
+                    height == THUMBNAIL_SIZES_MAP[i].height) {
+                    optimalWidth = width;
+                    optimalHeight = height;
+                    break;
+                }
+            }
+        }
+    }
+
+    set(KEY_JPEG_THUMBNAIL_WIDTH, optimalWidth);
+    set(KEY_JPEG_THUMBNAIL_HEIGHT, optimalHeight);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+
+ * FUNCTION   : setBurstLEDOnPeriod
+ *
+ * DESCRIPTION: set burst LED on period
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setBurstLEDOnPeriod(const QCameraParameters& params)
+{
+    int nBurstLEDOnPeriod = params.getInt(KEY_QC_SNAPSHOT_BURST_LED_ON_PERIOD);
+    //Check if the LED ON period is within limits
+    if ((nBurstLEDOnPeriod <= 0) || (nBurstLEDOnPeriod > 800)) {
+        // if burst led on period is not set in parameters,
+        // read from sys prop
+        char prop[PROPERTY_VALUE_MAX];
+        memset(prop, 0, sizeof(prop));
+        property_get("persist.camera.led.on.period", prop, "0");
+        nBurstLEDOnPeriod = atoi(prop);
+        if (nBurstLEDOnPeriod <= 0) {
+            nBurstLEDOnPeriod = 300;
+        }
+    }
+
+    set(KEY_QC_SNAPSHOT_BURST_LED_ON_PERIOD, nBurstLEDOnPeriod);
+    m_nBurstLEDOnPeriod = nBurstLEDOnPeriod;
+    CDBG_HIGH("%s: Burst LED on period  %u", __func__, m_nBurstLEDOnPeriod);
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_BURST_LED_ON_PERIOD,
+            (uint32_t)nBurstLEDOnPeriod)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+
+
+/*===========================================================================
+ * FUNCTION   : setRetroActiveBurstNum
+ *
+ * DESCRIPTION: set retro active burst num
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setRetroActiveBurstNum(
+        const QCameraParameters& params)
+{
+    int32_t nBurstNum = params.getInt(KEY_QC_NUM_RETRO_BURST_PER_SHUTTER);
+    CDBG_HIGH("%s:[ZSL Retro] m_nRetroBurstNum = %d", __func__, m_nRetroBurstNum);
+    if (nBurstNum <= 0) {
+        // if burst number is not set in parameters,
+        // read from sys prop
+        char prop[PROPERTY_VALUE_MAX];
+        memset(prop, 0, sizeof(prop));
+        property_get("persist.camera.retro.number", prop, "0");
+        nBurstNum = atoi(prop);
+        if (nBurstNum < 0) {
+            nBurstNum = 0;
+        }
+    }
+
+    set(KEY_QC_NUM_RETRO_BURST_PER_SHUTTER, nBurstNum);
+
+    m_nRetroBurstNum = nBurstNum;
+    CDBG_HIGH("%s: [ZSL Retro] m_nRetroBurstNum = %d", __func__, m_nRetroBurstNum);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setJpegQuality
+ *
+ * DESCRIPTION: set jpeg encpding quality from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setJpegQuality(const QCameraParameters& params)
+{
+    int32_t rc = NO_ERROR;
+    int quality = params.getInt(KEY_JPEG_QUALITY);
+    if (quality >= 0 && quality <= 100) {
+        set(KEY_JPEG_QUALITY, quality);
+    } else {
+        ALOGE("%s: Invalid jpeg quality=%d", __func__, quality);
+        rc = BAD_VALUE;
+    }
+
+    quality = params.getInt(KEY_JPEG_THUMBNAIL_QUALITY);
+    if (quality >= 0 && quality <= 100) {
+        set(KEY_JPEG_THUMBNAIL_QUALITY, quality);
+    } else {
+        ALOGE("%s: Invalid jpeg thumbnail quality=%d", __func__, quality);
+        rc = BAD_VALUE;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setOrientaion
+ *
+ * DESCRIPTION: set orientaion from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setOrientation(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_ORIENTATION);
+
+    if (str != NULL) {
+        if (strcmp(str, portrait) == 0 || strcmp(str, landscape) == 0) {
+            // Camera service needs this to decide if the preview frames and raw
+            // pictures should be rotated.
+            set(KEY_QC_ORIENTATION, str);
+        } else {
+            ALOGE("%s: Invalid orientation value: %s", __func__, str);
+            return BAD_VALUE;
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAutoExposure
+ *
+ * DESCRIPTION: set auto exposure value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAutoExposure(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_AUTO_EXPOSURE);
+    const char *prev_str = get(KEY_QC_AUTO_EXPOSURE);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setAutoExposure(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setPreviewFpsRange
+ *
+ * DESCRIPTION: set preview FPS range from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setPreviewFpsRange(const QCameraParameters& params)
+{
+    int minFps,maxFps;
+    int prevMinFps, prevMaxFps, vidMinFps, vidMaxFps;
+    int rc = NO_ERROR;
+    bool found = false, updateNeeded = false;
+
+    CameraParameters::getPreviewFpsRange(&prevMinFps, &prevMaxFps);
+    params.getPreviewFpsRange(&minFps, &maxFps);
+
+    CDBG_HIGH("%s: FpsRange Values:(%d, %d)", __func__, prevMinFps, prevMaxFps);
+    CDBG_HIGH("%s: Requested FpsRange Values:(%d, %d)", __func__, minFps, maxFps);
+
+    //first check if we need to change fps because of HFR mode change
+    updateNeeded = UpdateHFRFrameRate(params);
+    if (updateNeeded) {
+        m_bNeedRestart = true;
+        rc = setHighFrameRate(mHfrMode);
+        if (rc != NO_ERROR) goto end;
+    }
+    CDBG_HIGH("%s: UpdateHFRFrameRate %d", __func__, updateNeeded);
+
+    vidMinFps = (int)m_hfrFpsRange.video_min_fps;
+    vidMaxFps = (int)m_hfrFpsRange.video_max_fps;
+
+    if(minFps == prevMinFps && maxFps == prevMaxFps) {
+        if ( m_bFixedFrameRateSet ) {
+            minFps = params.getPreviewFrameRate() * 1000;
+            maxFps = params.getPreviewFrameRate() * 1000;
+            m_bFixedFrameRateSet = false;
+        } else if (!updateNeeded) {
+            CDBG_HIGH("%s: No change in FpsRange", __func__);
+            rc = NO_ERROR;
+            goto end;
+        }
+    }
+    for(size_t i = 0; i < m_pCapability->fps_ranges_tbl_cnt; i++) {
+        // if the value is in the supported list
+        if (minFps >= m_pCapability->fps_ranges_tbl[i].min_fps * 1000 &&
+                maxFps <= m_pCapability->fps_ranges_tbl[i].max_fps * 1000) {
+            found = true;
+            CDBG_HIGH("%s: FPS i=%d : minFps = %d, maxFps = %d"
+                    " vidMinFps = %d, vidMaxFps = %d",
+                    __func__, i, minFps, maxFps,
+                    (int)m_hfrFpsRange.video_min_fps,
+                    (int)m_hfrFpsRange.video_max_fps);
+            if ((0.0f >= m_hfrFpsRange.video_min_fps) ||
+                    (0.0f >= m_hfrFpsRange.video_max_fps)) {
+                vidMinFps = minFps;
+                vidMaxFps = maxFps;
+            }
+            else {
+                vidMinFps = (int)m_hfrFpsRange.video_min_fps;
+                vidMaxFps = (int)m_hfrFpsRange.video_max_fps;
+            }
+
+            setPreviewFpsRange(minFps, maxFps, vidMinFps, vidMaxFps);
+            break;
+        }
+    }
+    if(found == false){
+        ALOGE("%s: error: FPS range value not supported", __func__);
+        rc = BAD_VALUE;
+    }
+end:
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : UpdateHFRFrameRate
+ *
+ * DESCRIPTION: set preview FPS range based on HFR setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : bool true/false
+ *                  true -if HAL needs to overwrite FPS range set by app, false otherwise.
+ *==========================================================================*/
+
+bool QCameraParameters::UpdateHFRFrameRate(const QCameraParameters& params)
+{
+    bool updateNeeded = false;
+    int min_fps, max_fps;
+    int32_t hfrMode = CAM_HFR_MODE_OFF;
+    int32_t newHfrMode = CAM_HFR_MODE_OFF;
+
+    int parm_minfps,parm_maxfps;
+    int prevMinFps, prevMaxFps;
+    CameraParameters::getPreviewFpsRange(&prevMinFps, &prevMaxFps);
+    params.getPreviewFpsRange(&parm_minfps, &parm_maxfps);
+    CDBG_HIGH("%s: CameraParameters - : minFps = %d, maxFps = %d ",
+                __func__, prevMinFps, prevMaxFps);
+    CDBG_HIGH("%s: Requested params - : minFps = %d, maxFps = %d ",
+                __func__, parm_minfps, parm_maxfps);
+
+    const char *hfrStr = params.get(KEY_QC_VIDEO_HIGH_FRAME_RATE);
+    const char *hsrStr = params.get(KEY_QC_VIDEO_HIGH_SPEED_RECORDING);
+
+    const char *prev_hfrStr = CameraParameters::get(KEY_QC_VIDEO_HIGH_FRAME_RATE);
+    const char *prev_hsrStr = CameraParameters::get(KEY_QC_VIDEO_HIGH_SPEED_RECORDING);
+
+    if ((hfrStr != NULL) && (prev_hfrStr != NULL) && strcmp(hfrStr, prev_hfrStr)) {
+        updateParamEntry(KEY_QC_VIDEO_HIGH_FRAME_RATE, hfrStr);
+    }
+
+    if ((hsrStr != NULL) && (prev_hsrStr != NULL) && strcmp(hsrStr, prev_hsrStr)) {
+        updateParamEntry(KEY_QC_VIDEO_HIGH_SPEED_RECORDING, hsrStr);
+
+    }
+
+    // check if HFR is enabled
+    if ((hfrStr != NULL) && strcmp(hfrStr, "off")) {
+        hfrMode = lookupAttr(HFR_MODES_MAP, PARAM_MAP_SIZE(HFR_MODES_MAP), hfrStr);
+        if (NAME_NOT_FOUND != hfrMode) newHfrMode = hfrMode;
+    }
+    // check if HSR is enabled
+    else if ((hsrStr != NULL) && strcmp(hsrStr, "off")) {
+        hfrMode = lookupAttr(HFR_MODES_MAP, PARAM_MAP_SIZE(HFR_MODES_MAP), hsrStr);
+        if (NAME_NOT_FOUND != hfrMode) newHfrMode = hfrMode;
+    }
+    CDBG_HIGH("%s: prevHfrMode - %d, currentHfrMode = %d ",
+                __func__, mHfrMode, newHfrMode);
+
+    if (mHfrMode != newHfrMode) {
+        updateNeeded = true;
+        mHfrMode = newHfrMode;
+        switch (mHfrMode) {
+            case CAM_HFR_MODE_60FPS:
+                min_fps = 60000;
+                max_fps = 60000;
+                break;
+            case CAM_HFR_MODE_90FPS:
+                min_fps = 90000;
+                max_fps = 90000;
+                break;
+            case CAM_HFR_MODE_120FPS:
+                min_fps = 120000;
+                max_fps = 120000;
+                break;
+            case CAM_HFR_MODE_150FPS:
+                min_fps = 150000;
+                max_fps = 150000;
+                break;
+            case CAM_HFR_MODE_180FPS:
+                min_fps = 180000;
+                max_fps = 180000;
+                break;
+            case CAM_HFR_MODE_210FPS:
+                min_fps = 210000;
+                max_fps = 210000;
+                break;
+            case CAM_HFR_MODE_240FPS:
+                min_fps = 240000;
+                max_fps = 240000;
+                break;
+            case CAM_HFR_MODE_480FPS:
+                min_fps = 480000;
+                max_fps = 480000;
+                break;
+            case CAM_HFR_MODE_OFF:
+            default:
+                // Set Video Fps to zero
+                min_fps = 0;
+                max_fps = 0;
+                break;
+        }
+        m_hfrFpsRange.video_min_fps = (float)min_fps;
+        m_hfrFpsRange.video_max_fps = (float)max_fps;
+
+        CDBG_HIGH("%s: HFR mode (%d) Set video FPS : minFps = %d, maxFps = %d ",
+                __func__, mHfrMode, min_fps, max_fps);
+    }
+
+    m_hfrFpsRange.min_fps = (float)parm_minfps;
+    m_hfrFpsRange.max_fps = (float)parm_maxfps;
+
+    // Remember if HFR mode is ON
+    if ((mHfrMode > CAM_HFR_MODE_OFF) && (mHfrMode < CAM_HFR_MODE_MAX)) {
+        CDBG_HIGH("HFR mode is ON");
+        m_bHfrMode = true;
+    } else {
+        m_hfrFpsRange.video_min_fps = 0;
+        m_hfrFpsRange.video_max_fps = 0;
+        m_bHfrMode = false;
+        CDBG_HIGH("HFR mode is OFF");
+    }
+
+    if (m_bHfrMode && (mHfrMode > CAM_HFR_MODE_120FPS)
+            && (parm_maxfps != 0)) {
+        /* Setting Buffer batch count to use batch mode for higher fps*/
+        setBufBatchCount((int8_t)(m_hfrFpsRange.video_max_fps / parm_maxfps));
+    }
+
+    return updateNeeded;
+}
+
+/*===========================================================================
+ * FUNCTION   : setPreviewFrameRate
+ *
+ * DESCRIPTION: set preview frame rate from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setPreviewFrameRate(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_PREVIEW_FRAME_RATE);
+    const char *prev_str = get(KEY_PREVIEW_FRAME_RATE);
+
+    if ( str ) {
+        if ( prev_str &&
+             strcmp(str, prev_str)) {
+            CDBG("%s: Requested Fixed Frame Rate %s", __func__, str);
+            updateParamEntry(KEY_PREVIEW_FRAME_RATE, str);
+            m_bFixedFrameRateSet = true;
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setEffect
+ *
+ * DESCRIPTION: set effect value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setEffect(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_EFFECT);
+    const char *prev_str = get(KEY_EFFECT);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0 ||
+            m_bUpdateEffects == true ) {
+            m_bUpdateEffects = false;
+            return setEffect(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFocusMode
+ *
+ * DESCRIPTION: set focus mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFocusMode(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_FOCUS_MODE);
+    const char *prev_str = get(KEY_FOCUS_MODE);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setFocusMode(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFocusPosition
+ *
+ * DESCRIPTION: set focus position from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setFocusPosition(const QCameraParameters& params)
+{
+    const char *focus_str = params.get(KEY_FOCUS_MODE);
+
+    if (NULL == focus_str) {
+        return NO_ERROR;
+    }
+
+    CDBG("%s, current focus mode: %s", __func__, focus_str);
+    if (strcmp(focus_str, FOCUS_MODE_MANUAL_POSITION)) {
+        CDBG_HIGH("%s, dont set focus pos to back-end!", __func__);
+        return NO_ERROR;
+    }
+
+    const char *pos = params.get(KEY_QC_MANUAL_FOCUS_POSITION);
+    const char *prev_pos = get(KEY_QC_MANUAL_FOCUS_POSITION);
+    const char *type = params.get(KEY_QC_MANUAL_FOCUS_POS_TYPE);
+    const char *prev_type = get(KEY_QC_MANUAL_FOCUS_POS_TYPE);
+
+    if ((pos != NULL) && (type != NULL)) {
+        if (prev_pos  == NULL || (strcmp(pos, prev_pos) != 0) ||
+                prev_type == NULL || (strcmp(type, prev_type) != 0)) {
+            return setFocusPosition(type, pos);
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setBrightness
+ *
+ * DESCRIPTION: set brightness control value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setBrightness(const QCameraParameters& params)
+{
+    int currentBrightness = getInt(KEY_QC_BRIGHTNESS);
+    int brightness = params.getInt(KEY_QC_BRIGHTNESS);
+
+    if(params.get(KEY_QC_BRIGHTNESS) == NULL) {
+       CDBG_HIGH("%s: Brigtness not set by App ",__func__);
+       return NO_ERROR;
+    }
+    if (currentBrightness !=  brightness) {
+        if (brightness >= m_pCapability->brightness_ctrl.min_value &&
+            brightness <= m_pCapability->brightness_ctrl.max_value) {
+            CDBG(" new brightness value : %d ", brightness);
+            return setBrightness(brightness);
+        } else {
+            ALOGE("%s: invalid value %d out of (%d, %d)",
+                  __func__, brightness,
+                  m_pCapability->brightness_ctrl.min_value,
+                  m_pCapability->brightness_ctrl.max_value);
+            return BAD_VALUE;
+        }
+    } else {
+        CDBG("%s: No brightness value changed.", __func__);
+        return NO_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getBrightness
+ *
+ * DESCRIPTION: get brightness control value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraParameters::getBrightness()
+{
+    return getInt(KEY_QC_BRIGHTNESS);
+}
+
+/*===========================================================================
+ * FUNCTION   : setSharpness
+ *
+ * DESCRIPTION: set sharpness control value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSharpness(const QCameraParameters& params)
+{
+    int shaprness = params.getInt(KEY_QC_SHARPNESS);
+    int prev_sharp = getInt(KEY_QC_SHARPNESS);
+
+    if(params.get(KEY_QC_SHARPNESS) == NULL) {
+       CDBG_HIGH("%s: Sharpness not set by App ",__func__);
+       return NO_ERROR;
+    }
+    if (prev_sharp !=  shaprness) {
+        if((shaprness >= m_pCapability->sharpness_ctrl.min_value) &&
+           (shaprness <= m_pCapability->sharpness_ctrl.max_value)) {
+            CDBG(" new sharpness value : %d ", shaprness);
+            return setSharpness(shaprness);
+        } else {
+            ALOGE("%s: invalid value %d out of (%d, %d)",
+                  __func__, shaprness,
+                  m_pCapability->sharpness_ctrl.min_value,
+                  m_pCapability->sharpness_ctrl.max_value);
+            return BAD_VALUE;
+        }
+    } else {
+        CDBG("%s: No value change in shaprness", __func__);
+        return NO_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setSkintoneEnahancement
+ *
+ * DESCRIPTION: set skin tone enhancement factor from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSkinToneEnhancement(const QCameraParameters& params)
+{
+    int sceFactor = params.getInt(KEY_QC_SCE_FACTOR);
+    int prev_sceFactor = getInt(KEY_QC_SCE_FACTOR);
+
+    if(params.get(KEY_QC_SCE_FACTOR) == NULL) {
+       CDBG_HIGH("%s: Skintone enhancement not set by App ",__func__);
+       return NO_ERROR;
+    }
+    if (prev_sceFactor != sceFactor) {
+        if((sceFactor >= m_pCapability->sce_ctrl.min_value) &&
+           (sceFactor <= m_pCapability->sce_ctrl.max_value)) {
+            CDBG(" new Skintone Enhancement value : %d ", sceFactor);
+            return setSkinToneEnhancement(sceFactor);
+        } else {
+            ALOGE("%s: invalid value %d out of (%d, %d)",
+                  __func__, sceFactor,
+                  m_pCapability->sce_ctrl.min_value,
+                  m_pCapability->sce_ctrl.max_value);
+            return BAD_VALUE;
+        }
+    } else {
+        CDBG("%s: No value change in skintone enhancement factor", __func__);
+        return NO_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setSaturation
+ *
+ * DESCRIPTION: set saturation control value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSaturation(const QCameraParameters& params)
+{
+    int saturation = params.getInt(KEY_QC_SATURATION);
+    int prev_sat = getInt(KEY_QC_SATURATION);
+
+    if(params.get(KEY_QC_SATURATION) == NULL) {
+       CDBG_HIGH("%s: Saturation not set by App ",__func__);
+       return NO_ERROR;
+    }
+    if (prev_sat !=  saturation) {
+        if((saturation >= m_pCapability->saturation_ctrl.min_value) &&
+           (saturation <= m_pCapability->saturation_ctrl.max_value)) {
+            CDBG(" new saturation value : %d ", saturation);
+            return setSaturation(saturation);
+        } else {
+            ALOGE("%s: invalid value %d out of (%d, %d)",
+                  __func__, saturation,
+                  m_pCapability->saturation_ctrl.min_value,
+                  m_pCapability->saturation_ctrl.max_value);
+            return BAD_VALUE;
+        }
+    } else {
+        CDBG("%s: No value change in saturation factor", __func__);
+        return NO_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setContrast
+ *
+ * DESCRIPTION: set contrast control value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setContrast(const QCameraParameters& params)
+{
+    int contrast = params.getInt(KEY_QC_CONTRAST);
+    int prev_contrast = getInt(KEY_QC_CONTRAST);
+
+    if(params.get(KEY_QC_CONTRAST) == NULL) {
+       CDBG_HIGH("%s: Contrast not set by App ",__func__);
+       return NO_ERROR;
+    }
+    if (prev_contrast !=  contrast) {
+        if((contrast >= m_pCapability->contrast_ctrl.min_value) &&
+           (contrast <= m_pCapability->contrast_ctrl.max_value)) {
+            CDBG(" new contrast value : %d ", contrast);
+            int32_t rc = setContrast(contrast);
+            return rc;
+        } else {
+            ALOGE("%s: invalid value %d out of (%d, %d)",
+                  __func__, contrast,
+                  m_pCapability->contrast_ctrl.min_value,
+                  m_pCapability->contrast_ctrl.max_value);
+            return BAD_VALUE;
+        }
+    } else {
+        CDBG("%s: No value change in contrast", __func__);
+        return NO_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setExposureCompensation
+ *
+ * DESCRIPTION: set exposure compensation value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setExposureCompensation(const QCameraParameters & params)
+{
+    int expComp = params.getInt(KEY_EXPOSURE_COMPENSATION);
+    int prev_expComp = getInt(KEY_EXPOSURE_COMPENSATION);
+
+    if(params.get(KEY_EXPOSURE_COMPENSATION) == NULL) {
+       CDBG_HIGH("%s: Exposure compensation not set by App ",__func__);
+       return NO_ERROR;
+    }
+    if (prev_expComp != expComp) {
+        if((expComp >= m_pCapability->exposure_compensation_min) &&
+           (expComp <= m_pCapability->exposure_compensation_max)) {
+            CDBG(" new Exposure Compensation value : %d ", expComp);
+            return setExposureCompensation(expComp);
+        } else {
+            ALOGE("%s: invalid value %d out of (%d, %d)",
+                  __func__, expComp,
+                  m_pCapability->exposure_compensation_min,
+                  m_pCapability->exposure_compensation_max);
+            return BAD_VALUE;
+        }
+    } else {
+        CDBG("%s: No value change in Exposure Compensation", __func__);
+        return NO_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setWhiteBalance
+ *
+ * DESCRIPTION: set white balance value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setWhiteBalance(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_WHITE_BALANCE);
+    const char *prev_str = get(KEY_WHITE_BALANCE);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setWhiteBalance(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setManualWhiteBalance
+ *
+ * DESCRIPTION: set manual white balance from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setManualWhiteBalance(const QCameraParameters& params)
+{
+    int32_t rc = NO_ERROR;
+    const char *wb_str = params.get(KEY_WHITE_BALANCE);
+    CDBG("%s, current wb mode: %s", __func__, wb_str);
+
+    if (wb_str != NULL) {
+        if (strcmp(wb_str, WHITE_BALANCE_MANUAL)) {
+            CDBG("%s, dont set cct to back-end.", __func__);
+            return NO_ERROR;
+        }
+    }
+
+    const char *value = params.get(KEY_QC_MANUAL_WB_VALUE);
+    const char *prev_value = get(KEY_QC_MANUAL_WB_VALUE);
+    const char *type = params.get(KEY_QC_MANUAL_WB_TYPE);
+    const char *prev_type = get(KEY_QC_MANUAL_WB_TYPE);
+
+    if ((value != NULL) && (type != NULL)) {
+        if (prev_value  == NULL || (strcmp(value, prev_value) != 0) ||
+                prev_type == NULL || (strcmp(type, prev_type) != 0)) {
+            updateParamEntry(KEY_QC_MANUAL_WB_TYPE, type);
+            updateParamEntry(KEY_QC_MANUAL_WB_VALUE, value);
+            int32_t wb_type = atoi(type);
+            if (wb_type == CAM_MANUAL_WB_MODE_CCT) {
+                rc = setWBManualCCT(value);
+            } else if (wb_type == CAM_MANUAL_WB_MODE_GAIN) {
+                rc = setManualWBGains(value);
+            } else {
+                rc = BAD_VALUE;
+            }
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAntibanding
+ *
+ * DESCRIPTION: set antibanding value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAntibanding(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_ANTIBANDING);
+    const char *prev_str = get(KEY_ANTIBANDING);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setAntibanding(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setStatsDebugMask
+ *
+ * DESCRIPTION: get the value from persist file in Stats module that will
+ *              control funtionality in the module
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setStatsDebugMask()
+{
+    uint32_t mask = 0;
+    char value[PROPERTY_VALUE_MAX];
+
+    property_get("persist.camera.stats.debug.mask", value, "0");
+    mask = (uint32_t)atoi(value);
+
+    CDBG_HIGH("%s: ctrl mask :%d", __func__, mask);
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_STATS_DEBUG_MASK, mask)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setPAAF
+ *
+ * DESCRIPTION: get the value from persist file in Stats module that will
+ *              control the preview assisted AF in the module
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setPAAF()
+{
+    uint32_t paaf = 0;
+    char value[PROPERTY_VALUE_MAX];
+
+    property_get("persist.camera.stats.af.paaf", value, "1");
+    paaf = (uint32_t)atoi(value);
+
+    CDBG_HIGH("%s: PAAF is: %s", __func__, paaf ? "ON": "OFF");
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_STATS_AF_PAAF, paaf)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSceneDetect
+ *
+ * DESCRIPTION: set scenen detect value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSceneDetect(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_SCENE_DETECT);
+    const char *prev_str = get(KEY_QC_SCENE_DETECT);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setSceneDetect(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setVideoHDR
+ *
+ * DESCRIPTION: set video HDR value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setVideoHDR(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_VIDEO_HDR);
+    const char *prev_str = get(KEY_QC_VIDEO_HDR);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setVideoHDR(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setVtEnable
+ *
+ * DESCRIPTION: set vt Time Stamp enable from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setVtEnable(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_VT_ENABLE);
+    const char *prev_str = get(KEY_QC_VT_ENABLE);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setVtEnable(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFaceRecognition
+ *
+ * DESCRIPTION: set face recognition mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFaceRecognition(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_FACE_RECOGNITION);
+    const char *prev_str = get(KEY_QC_FACE_RECOGNITION);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            uint32_t maxFaces = (uint32_t)params.getInt(KEY_QC_MAX_NUM_REQUESTED_FACES);
+            return setFaceRecognition(str, maxFaces);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setZoom
+ *
+ * DESCRIPTION: set zoom value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setZoom(const QCameraParameters& params)
+{
+    if ((m_pCapability->zoom_supported == 0 ||
+         m_pCapability->zoom_ratio_tbl_cnt == 0)) {
+        CDBG_HIGH("%s: no zoom support", __func__);
+        return NO_ERROR;
+    }
+
+    int zoomLevel = params.getInt(KEY_ZOOM);
+    mParmZoomLevel = zoomLevel;
+    if ((zoomLevel < 0) || (zoomLevel >= (int)m_pCapability->zoom_ratio_tbl_cnt)) {
+        ALOGE("%s: invalid value %d out of (%d, %d)",
+              __func__, zoomLevel,
+              0, m_pCapability->zoom_ratio_tbl_cnt-1);
+        return BAD_VALUE;
+    }
+
+    int prevZoomLevel = getInt(KEY_ZOOM);
+    if (prevZoomLevel == zoomLevel) {
+        CDBG("%s: No value change in zoom %d %d", __func__, prevZoomLevel, zoomLevel);
+        return NO_ERROR;
+    }
+
+    return setZoom(zoomLevel);
+}
+
+/*===========================================================================
+ * FUNCTION   : setISOValue
+ *
+ * DESCRIPTION: set ISO value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setISOValue(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_ISO_MODE);
+    const char *prev_str = get(KEY_QC_ISO_MODE);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setISOValue(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setContinuousISO
+ *
+ * DESCRIPTION: set ISO value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setContinuousISO(const char *isoValue)
+{
+    char iso[PROPERTY_VALUE_MAX];
+    int32_t continous_iso = 0;
+
+    // Check if continuous ISO is set through setproperty
+    property_get("persist.camera.continuous.iso", iso, "");
+    if (strlen(iso) > 0) {
+        continous_iso = atoi(iso);
+    } else {
+        continous_iso = atoi(isoValue);
+    }
+
+    if ((continous_iso >= 0) &&
+            (continous_iso <= m_pCapability->sensitivity_range.max_sensitivity)) {
+        CDBG_HIGH("%s: Setting continuous ISO value %d", __func__, continous_iso);
+        updateParamEntry(KEY_QC_CONTINUOUS_ISO, isoValue);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_ISO, continous_iso)) {
+            return BAD_VALUE;
+        }
+        return NO_ERROR;
+    }
+    ALOGE("Invalid iso value: %d", continous_iso);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setExposureTime
+ *
+ * DESCRIPTION: set exposure time from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setExposureTime(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_EXPOSURE_TIME);
+    const char *prev_str = get(KEY_QC_EXPOSURE_TIME);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+                strcmp(str, prev_str) != 0) {
+            return setExposureTime(str);
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setVideoRotation
+ *
+ * DESCRIPTION: set rotation value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setVideoRotation(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_VIDEO_ROTATION);
+    if(str != NULL) {
+        int value = lookupAttr(VIDEO_ROTATION_MODES_MAP,
+                PARAM_MAP_SIZE(VIDEO_ROTATION_MODES_MAP), str);
+        if (value != NAME_NOT_FOUND) {
+            updateParamEntry(KEY_QC_VIDEO_ROTATION, str);
+            ALOGV("setVideoRotation: %s:  %d: ", str, value);
+        } else {
+            ALOGE("Invalid rotation value: %d", value);
+            return BAD_VALUE;
+        }
+
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setRotation
+ *
+ * DESCRIPTION: set rotation value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setRotation(const QCameraParameters& params)
+{
+    int32_t rotation = params.getInt(KEY_ROTATION);
+    if (rotation != -1) {
+        if (rotation == 0 || rotation == 90 ||
+            rotation == 180 || rotation == 270) {
+            set(KEY_ROTATION, rotation);
+
+            ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_META_JPEG_ORIENTATION,
+                    rotation);
+            mRotation = rotation;
+        } else {
+            ALOGE("Invalid rotation value: %d", rotation);
+            return BAD_VALUE;
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFlash
+ *
+ * DESCRIPTION: set flash mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFlash(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_FLASH_MODE);
+    const char *prev_str = get(KEY_FLASH_MODE);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setFlash(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAecLock
+ *
+ * DESCRIPTION: set AEC lock value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAecLock(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_AUTO_EXPOSURE_LOCK);
+    const char *prev_str = get(KEY_AUTO_EXPOSURE_LOCK);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setAecLock(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAwbLock
+ *
+ * DESCRIPTION: set AWB lock from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAwbLock(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_AUTO_WHITEBALANCE_LOCK);
+    const char *prev_str = get(KEY_AUTO_WHITEBALANCE_LOCK);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setAwbLock(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAutoHDR
+ *
+ * DESCRIPTION: Enable/disable auto HDR
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAutoHDR(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_AUTO_HDR_ENABLE);
+    const char *prev_str = get(KEY_QC_AUTO_HDR_ENABLE);
+    char prop[PROPERTY_VALUE_MAX];
+
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.auto.hdr.enable", prop, VALUE_DISABLE);
+    if (str != NULL) {
+       if (prev_str == NULL ||
+           strcmp(str, prev_str) != 0) {
+           CDBG_HIGH("%s : Auto HDR set to: %s", __func__, str);
+           return updateParamEntry(KEY_QC_AUTO_HDR_ENABLE, str);
+       }
+    } else {
+       if (prev_str == NULL ||
+           strcmp(prev_str, prop) != 0 ) {
+           CDBG_HIGH("%s : Auto HDR set to: %s", __func__, prop);
+           updateParamEntry(KEY_QC_AUTO_HDR_ENABLE, prop);
+       }
+    }
+
+       return NO_ERROR;
+}
+
+/*===========================================================================
+* FUNCTION   : isAutoHDREnabled
+*
+* DESCRIPTION: Query auto HDR status
+*
+* PARAMETERS : None
+*
+* RETURN     : bool true/false
+*==========================================================================*/
+bool QCameraParameters::isAutoHDREnabled()
+{
+    const char *str = get(KEY_QC_AUTO_HDR_ENABLE);
+    if (str != NULL) {
+        int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+                PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), str);
+        if (value == NAME_NOT_FOUND) {
+            ALOGE("%s: Invalid Auto HDR value %s", __func__, str);
+            return false;
+        }
+
+        CDBG_HIGH("%s : Auto HDR status is: %d", __func__, value);
+        return value ? true : false;
+    }
+
+    CDBG_HIGH("%s : Auto HDR status not set!", __func__);
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : setMCEValue
+ *
+ * DESCRIPTION: set memory color enhancement value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setMCEValue(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_MEMORY_COLOR_ENHANCEMENT);
+    const char *prev_str = get(KEY_QC_MEMORY_COLOR_ENHANCEMENT);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setMCEValue(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setDISValue
+ *
+ * DESCRIPTION: enable/disable DIS from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setDISValue(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_DIS);
+    const char *prev_str = get(KEY_QC_DIS);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setDISValue(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setLensShadeValue
+ *
+ * DESCRIPTION: set lens shade value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setLensShadeValue(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_LENSSHADE);
+    const char *prev_str = get(KEY_QC_LENSSHADE);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setLensShadeValue(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFocusAreas
+ *
+ * DESCRIPTION: set focus areas from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFocusAreas(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_FOCUS_AREAS);
+    if (str != NULL) {
+        int max_num_af_areas = getInt(KEY_MAX_NUM_FOCUS_AREAS);
+        if(max_num_af_areas == 0) {
+            ALOGE("%s: max num of AF area is 0, cannot set focus areas", __func__);
+            return BAD_VALUE;
+        }
+
+        const char *prev_str = get(KEY_FOCUS_AREAS);
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setFocusAreas(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setMeteringAreas
+ *
+ * DESCRIPTION: set metering areas from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setMeteringAreas(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_METERING_AREAS);
+    if (str != NULL) {
+        int max_num_mtr_areas = getInt(KEY_MAX_NUM_METERING_AREAS);
+        if(max_num_mtr_areas == 0) {
+            ALOGE("%s: max num of metering areas is 0, cannot set focus areas", __func__);
+            return BAD_VALUE;
+        }
+
+        const char *prev_str = get(KEY_METERING_AREAS);
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setMeteringAreas(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSceneMode
+ *
+ * DESCRIPTION: set scenen mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSceneMode(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_SCENE_MODE);
+    const char *prev_str = get(KEY_SCENE_MODE);
+    CDBG_HIGH("%s: str - %s, prev_str - %s",__func__, str, prev_str);
+
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+
+            if(strcmp(str, SCENE_MODE_AUTO) == 0) {
+                m_bSceneTransitionAuto = true;
+            }
+            if (strcmp(str, SCENE_MODE_HDR) == 0) {
+
+                // If HDR is set from client  and the feature is not enabled in the backend, ignore it.
+                if (m_bHDRModeSensor) {
+                    m_bSensorHDREnabled = true;
+                    CDBG_HIGH("%s: Sensor HDR mode Enabled",__func__);
+                } else {
+                    m_bHDREnabled = true;
+                    CDBG_HIGH("%s: S/W HDR Enabled",__func__);
+                }
+            } else {
+                m_bHDREnabled = false;
+                if (m_bSensorHDREnabled) {
+                    m_bSensorHDREnabled = false;
+                    m_bNeedRestart = true;
+                    setSensorSnapshotHDR("off");
+                }
+            }
+
+            if (m_bSensorHDREnabled) {
+                setSensorSnapshotHDR("on");
+                m_bNeedRestart = true;
+            } else if ((m_bHDREnabled) ||
+                ((prev_str != NULL) && (strcmp(prev_str, SCENE_MODE_HDR) == 0))) {
+                CDBG_HIGH("%s: scene mode changed between HDR and non-HDR, need restart", __func__);
+
+                m_bNeedRestart = true;
+            }
+
+            return setSceneMode(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSelectableZoneAf
+ *
+ * DESCRIPTION: set selectable zone auto focus value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSelectableZoneAf(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_SELECTABLE_ZONE_AF);
+    const char *prev_str = get(KEY_QC_SELECTABLE_ZONE_AF);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setSelectableZoneAf(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAEBracket
+ *
+ * DESCRIPTION: set AE bracket from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAEBracket(const QCameraParameters& params)
+{
+    if (isHDREnabled()) {
+        CDBG_HIGH("%s: scene mode is HDR, overwrite AE bracket setting to off", __func__);
+        return setAEBracket(AE_BRACKET_OFF);
+    }
+
+    const char *expStr = params.get(KEY_QC_CAPTURE_BURST_EXPOSURE);
+    if (NULL != expStr && strlen(expStr) > 0) {
+        set(KEY_QC_CAPTURE_BURST_EXPOSURE, expStr);
+    } else {
+        char prop[PROPERTY_VALUE_MAX];
+        memset(prop, 0, sizeof(prop));
+        property_get("persist.capture.burst.exposures", prop, "");
+        if (strlen(prop) > 0) {
+            set(KEY_QC_CAPTURE_BURST_EXPOSURE, prop);
+        } else {
+            remove(KEY_QC_CAPTURE_BURST_EXPOSURE);
+        }
+    }
+
+    const char *str = params.get(KEY_QC_AE_BRACKET_HDR);
+    const char *prev_str = get(KEY_QC_AE_BRACKET_HDR);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setAEBracket(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAFBracket
+ *
+ * DESCRIPTION: set AF bracket from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAFBracket(const QCameraParameters& params)
+{
+    if ((m_pCapability->qcom_supported_feature_mask &
+            (CAM_QCOM_FEATURE_REFOCUS | CAM_QCOM_FEATURE_UBIFOCUS)) == 0) {
+        CDBG_HIGH("%s: AF Bracketing is not supported",__func__);
+        return NO_ERROR;
+    }
+    const char *str = params.get(KEY_QC_AF_BRACKET);
+    const char *prev_str = get(KEY_QC_AF_BRACKET);
+    CDBG_HIGH("%s: str =%s & prev_str =%s",__func__, str, prev_str);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            m_bNeedRestart = true;
+            return setAFBracket(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setReFocus
+ *
+ * DESCRIPTION: set refocus from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setReFocus(const QCameraParameters& params)
+{
+    if ((m_pCapability->qcom_supported_feature_mask &
+            (CAM_QCOM_FEATURE_REFOCUS | CAM_QCOM_FEATURE_UBIFOCUS)) == 0) {
+        ALOGD("%s: AF Bracketing is not supported",__func__);
+        return NO_ERROR;
+    }
+    const char *str = params.get(KEY_QC_RE_FOCUS);
+    const char *prev_str = get(KEY_QC_RE_FOCUS);
+    CDBG_HIGH("%s: str =%s & prev_str =%s",__func__, str, prev_str);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            m_bNeedRestart = true;
+            return setReFocus(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setChromaFlash
+ *
+ * DESCRIPTION: set chroma flash from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setChromaFlash(const QCameraParameters& params)
+{
+    if ((m_pCapability->qcom_supported_feature_mask &
+        CAM_QCOM_FEATURE_CHROMA_FLASH) == 0) {
+        CDBG_HIGH("%s: Chroma Flash is not supported",__func__);
+        return NO_ERROR;
+    }
+    const char *str = params.get(KEY_QC_CHROMA_FLASH);
+    const char *prev_str = get(KEY_QC_CHROMA_FLASH);
+    CDBG_HIGH("%s: str =%s & prev_str =%s",__func__, str, prev_str);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            m_bNeedRestart = true;
+            return setChromaFlash(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setOptiZoom
+ *
+ * DESCRIPTION: set opti zoom from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setOptiZoom(const QCameraParameters& params)
+{
+    if ((m_pCapability->qcom_supported_feature_mask &
+        CAM_QCOM_FEATURE_OPTIZOOM) == 0){
+        CDBG_HIGH("%s: Opti Zoom is not supported",__func__);
+        return NO_ERROR;
+    }
+    const char *str = params.get(KEY_QC_OPTI_ZOOM);
+    const char *prev_str = get(KEY_QC_OPTI_ZOOM);
+    CDBG_HIGH("%s: str =%s & prev_str =%s",__func__, str, prev_str);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            m_bNeedRestart = true;
+            return setOptiZoom(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setTruePortrait
+ *
+ * DESCRIPTION: set true portrait from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setTruePortrait(const QCameraParameters& params)
+{
+    if ((m_pCapability->qcom_supported_feature_mask &
+            CAM_QCOM_FEATURE_TRUEPORTRAIT) == 0) {
+        CDBG("%s: True Portrait is not supported",__func__);
+        return NO_ERROR;
+    }
+    const char *str = params.get(KEY_QC_TRUE_PORTRAIT);
+    const char *prev_str = get(KEY_QC_TRUE_PORTRAIT);
+    CDBG_HIGH("%s: str =%s & prev_str =%s", __func__, str, prev_str);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setTruePortrait(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHDRMode
+ *
+ * DESCRIPTION: set HDR mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setHDRMode(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_HDR_MODE);
+    const char *prev_str = get(KEY_QC_HDR_MODE);
+    uint32_t supported_hdr_modes = m_pCapability->qcom_supported_feature_mask &
+          (CAM_QCOM_FEATURE_SENSOR_HDR | CAM_QCOM_FEATURE_HDR);
+
+    CDBG_HIGH("%s: str =%s & prev_str =%s",__func__, str, prev_str);
+    if (str != NULL) {
+        if ((CAM_QCOM_FEATURE_SENSOR_HDR == supported_hdr_modes) &&
+                (strncmp(str, HDR_MODE_SENSOR, strlen(HDR_MODE_SENSOR)))) {
+            CDBG_HIGH("%s: Only sensor HDR is supported",__func__);
+            return NO_ERROR;
+        } else if  ((CAM_QCOM_FEATURE_HDR == supported_hdr_modes) &&
+                (strncmp(str, HDR_MODE_SENSOR, strlen(HDR_MODE_MULTI_FRAME)))) {
+            CDBG_HIGH("%s: Only multi frame HDR is supported",__func__);
+            return NO_ERROR;
+        } else if (!supported_hdr_modes) {
+            CDBG_HIGH("%s: HDR is not supported",__func__);
+            return NO_ERROR;
+        }
+        if (prev_str == NULL ||
+                strcmp(str, prev_str) != 0) {
+            return setHDRMode(str);
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHDRNeed1x
+ *
+ * DESCRIPTION: set HDR need 1x from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setHDRNeed1x(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_HDR_NEED_1X);
+    const char *prev_str = get(KEY_QC_HDR_NEED_1X);
+
+    CDBG_HIGH("%s: str =%s & prev_str =%s", __func__, str, prev_str);
+    if (str != NULL) {
+        if (m_bHDRModeSensor) {
+            CDBG_HIGH("%s: Only multi frame HDR supports 1x frame",__func__);
+            return NO_ERROR;
+        }
+        if ((prev_str == NULL) || (strcmp(str, prev_str) != 0)) {
+            return setHDRNeed1x(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSeeMore
+ *
+ * DESCRIPTION: set see more (llvd) from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSeeMore(const QCameraParameters& params)
+{
+    if ((m_pCapability->qcom_supported_feature_mask &
+            CAM_QCOM_FEATURE_LLVD) == 0) {
+        CDBG("%s: See more is not supported", __func__);
+        return NO_ERROR;
+    }
+    const char *str = params.get(KEY_QC_SEE_MORE);
+    const char *prev_str = get(KEY_QC_SEE_MORE);
+    CDBG_HIGH("%s: str =%s & prev_str =%s", __func__, str, prev_str);
+    if (str != NULL) {
+        if (prev_str == NULL || strcmp(str, prev_str) != 0) {
+            m_bNeedRestart = true;
+            return setSeeMore(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setStillMore
+ *
+ * DESCRIPTION: set stillmore from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setStillMore(const QCameraParameters& params)
+{
+    if ((m_pCapability->qcom_supported_feature_mask &
+            CAM_QCOM_FEATURE_STILLMORE) == 0) {
+        CDBG("%s: Stillmore is not supported",__func__);
+        return NO_ERROR;
+    }
+    const char *str = params.get(KEY_QC_STILL_MORE);
+    const char *prev_str = get(KEY_QC_STILL_MORE);
+    CDBG_HIGH("%s: str =%s & prev_str =%s", __func__, str, prev_str);
+    if (str != NULL) {
+        if (prev_str == NULL || strcmp(str, prev_str) != 0) {
+            m_bNeedRestart = true;
+            return setStillMore(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setRedeyeReduction
+ *
+ * DESCRIPTION: set red eye reduction setting from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setRedeyeReduction(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_REDEYE_REDUCTION);
+    const char *prev_str = get(KEY_QC_REDEYE_REDUCTION);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setRedeyeReduction(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setGpsLocation
+ *
+ * DESCRIPTION: set GPS location information from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setGpsLocation(const QCameraParameters& params)
+{
+    const char *method = params.get(KEY_GPS_PROCESSING_METHOD);
+    if (method) {
+        set(KEY_GPS_PROCESSING_METHOD, method);
+    }else {
+        remove(KEY_GPS_PROCESSING_METHOD);
+    }
+
+    const char *latitude = params.get(KEY_GPS_LATITUDE);
+    if (latitude) {
+        set(KEY_GPS_LATITUDE, latitude);
+    }else {
+        remove(KEY_GPS_LATITUDE);
+    }
+
+    const char *latitudeRef = params.get(KEY_QC_GPS_LATITUDE_REF);
+    if (latitudeRef) {
+        set(KEY_QC_GPS_LATITUDE_REF, latitudeRef);
+    }else {
+        remove(KEY_QC_GPS_LATITUDE_REF);
+    }
+
+    const char *longitude = params.get(KEY_GPS_LONGITUDE);
+    if (longitude) {
+        set(KEY_GPS_LONGITUDE, longitude);
+    }else {
+        remove(KEY_GPS_LONGITUDE);
+    }
+
+    const char *longitudeRef = params.get(KEY_QC_GPS_LONGITUDE_REF);
+    if (longitudeRef) {
+        set(KEY_QC_GPS_LONGITUDE_REF, longitudeRef);
+    }else {
+        remove(KEY_QC_GPS_LONGITUDE_REF);
+    }
+
+    const char *altitudeRef = params.get(KEY_QC_GPS_ALTITUDE_REF);
+    if (altitudeRef) {
+        set(KEY_QC_GPS_ALTITUDE_REF, altitudeRef);
+    }else {
+        remove(KEY_QC_GPS_ALTITUDE_REF);
+    }
+
+    const char *altitude = params.get(KEY_GPS_ALTITUDE);
+    if (altitude) {
+        set(KEY_GPS_ALTITUDE, altitude);
+    }else {
+        remove(KEY_GPS_ALTITUDE);
+    }
+
+    const char *status = params.get(KEY_QC_GPS_STATUS);
+    if (status) {
+        set(KEY_QC_GPS_STATUS, status);
+    } else {
+        remove(KEY_QC_GPS_STATUS);
+    }
+
+    const char *timestamp = params.get(KEY_GPS_TIMESTAMP);
+    if (timestamp) {
+        set(KEY_GPS_TIMESTAMP, timestamp);
+    }else {
+        remove(KEY_GPS_TIMESTAMP);
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setNumOfSnapshot
+ *
+ * DESCRIPTION: set number of snapshot per shutter from user setting
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setNumOfSnapshot()
+{
+    int nBurstNum = getBurstNum();
+    int nExpnum = 0;
+
+    const char *bracket_str = get(KEY_QC_AE_BRACKET_HDR);
+    if (bracket_str != NULL && strlen(bracket_str) > 0) {
+        int value = lookupAttr(BRACKETING_MODES_MAP, PARAM_MAP_SIZE(BRACKETING_MODES_MAP),
+                bracket_str);
+        switch (value) {
+        case CAM_EXP_BRACKETING_ON:
+            {
+                nExpnum = 0;
+                const char *str_val = get(KEY_QC_CAPTURE_BURST_EXPOSURE);
+                if ((str_val != NULL) && (strlen(str_val) > 0)) {
+                    char prop[PROPERTY_VALUE_MAX];
+                    memset(prop, 0, sizeof(prop));
+                    strlcpy(prop, str_val, PROPERTY_VALUE_MAX);
+                    char *saveptr = NULL;
+                    char *token = strtok_r(prop, ",", &saveptr);
+                    while (token != NULL) {
+                        token = strtok_r(NULL, ",", &saveptr);
+                        nExpnum++;
+                    }
+                }
+                if (nExpnum == 0) {
+                    nExpnum = 1;
+                }
+            }
+            break;
+        default:
+            nExpnum = 1 + getNumOfExtraHDROutBufsIfNeeded();
+            break;
+        }
+    }
+
+    if (isUbiRefocus()) {
+        nBurstNum = m_pCapability->refocus_af_bracketing_need.output_count + 1;
+    }
+
+    CDBG_HIGH("%s: nBurstNum = %d, nExpnum = %d", __func__, nBurstNum, nExpnum);
+    set(KEY_QC_NUM_SNAPSHOT_PER_SHUTTER, nBurstNum * nExpnum);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setRecordingHint
+ *
+ * DESCRIPTION: set recording hint value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setRecordingHint(const QCameraParameters& params)
+{
+    const char * str = params.get(KEY_RECORDING_HINT);
+    const char *prev_str = get(KEY_RECORDING_HINT);
+    if (str != NULL) {
+        if (prev_str == NULL || strcmp(str, prev_str) != 0) {
+            int32_t value = lookupAttr(TRUE_FALSE_MODES_MAP, PARAM_MAP_SIZE(TRUE_FALSE_MODES_MAP),
+                    str);
+            if(value != NAME_NOT_FOUND){
+                updateParamEntry(KEY_RECORDING_HINT, str);
+                setRecordingHintValue(value);
+                if (getFaceDetectionOption() == true) {
+                    setFaceDetection(value > 0 ? false : true, false);
+                }
+                if (m_bDISEnabled) {
+                    CDBG_HIGH("%s: %d: Setting DIS value again", __func__, __LINE__);
+                    setDISValue(VALUE_ENABLE);
+                }
+                return NO_ERROR;
+            } else {
+                ALOGE("Invalid recording hint value: %s", str);
+                return BAD_VALUE;
+            }
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setNoDisplayMode
+ *
+ * DESCRIPTION: set no display mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setNoDisplayMode(const QCameraParameters& params)
+{
+    const char *str_val  = params.get(KEY_QC_NO_DISPLAY_MODE);
+    const char *prev_str = get(KEY_QC_NO_DISPLAY_MODE);
+    char prop[PROPERTY_VALUE_MAX];
+
+    if(str_val && strlen(str_val) > 0) {
+        if (prev_str == NULL || strcmp(str_val, prev_str) != 0) {
+            m_bNoDisplayMode = atoi(str_val);
+            set(KEY_QC_NO_DISPLAY_MODE, str_val);
+            m_bNeedRestart = true;
+        }
+    } else {
+        memset(prop, 0, sizeof(prop));
+        property_get("persist.camera.no-display", prop, "0");
+        m_bNoDisplayMode = atoi(prop);
+    }
+    CDBG_HIGH("Param m_bNoDisplayMode = %d", m_bNoDisplayMode);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setZslMode
+ *
+ * DESCRIPTION: set ZSL mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setZslMode(const QCameraParameters& params)
+{
+    const char *str_val  = params.get(KEY_QC_ZSL);
+    const char *prev_val  = get(KEY_QC_ZSL);
+    int32_t rc = NO_ERROR;
+
+    if(m_bForceZslMode && !m_bZslMode) {
+        // Force ZSL mode to ON
+        set(KEY_QC_ZSL, VALUE_ON);
+        m_bZslMode_new = true;
+        m_bZslMode = true;
+        m_bNeedRestart = true;
+        int32_t value = m_bForceZslMode;
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_ZSL_MODE, value)) {
+            rc = BAD_VALUE;
+        }
+    } else if (str_val != NULL) {
+        if (prev_val == NULL || strcmp(str_val, prev_val) != 0) {
+            int32_t value = lookupAttr(ON_OFF_MODES_MAP, PARAM_MAP_SIZE(ON_OFF_MODES_MAP),
+                    str_val);
+            if (value != NAME_NOT_FOUND) {
+                set(KEY_QC_ZSL, str_val);
+                m_bZslMode_new = (value > 0)? true : false;
+
+                // ZSL mode changed, need restart preview
+                m_bNeedRestart = true;
+
+                if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_ZSL_MODE, value)) {
+                    rc = BAD_VALUE;
+                }
+            } else {
+                ALOGE("Invalid ZSL mode value: %s", str_val);
+                rc = BAD_VALUE;
+            }
+        }
+    }
+    ALOGI("%s: enabled: %d", __func__, m_bZslMode_new);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setWaveletDenoise
+ *
+ * DESCRIPTION: set wavelet denoise value from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setWaveletDenoise(const QCameraParameters& params)
+{
+    const char *str_pf = params.getPictureFormat();
+    int32_t pictureFormat = lookupAttr(PICTURE_TYPES_MAP, PARAM_MAP_SIZE(PICTURE_TYPES_MAP),
+            str_pf);
+    if (pictureFormat != NAME_NOT_FOUND) {
+        if (CAM_FORMAT_YUV_422_NV16 == pictureFormat) {
+            ALOGE("NV16 format isn't supported in denoise lib!");
+            return setWaveletDenoise(DENOISE_OFF);
+        }
+    }
+    const char *str = params.get(KEY_QC_DENOISE);
+    const char *prev_str = get(KEY_QC_DENOISE);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setWaveletDenoise(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setTemporalDenoise
+ *
+ * DESCRIPTION: set temporal denoise value from properties
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setTemporalDenoise(const QCameraParameters& params)
+{
+    if ((m_pCapability->qcom_supported_feature_mask & CAM_QCOM_FEATURE_CPP_TNR) == 0) {
+        CDBG_HIGH("%s: TNR is not supported",__func__);
+        return NO_ERROR;
+    }
+    const char *str = params.get(KEY_QC_TNR_MODE);
+    const char *prev_str = get(KEY_QC_TNR_MODE);
+    const char *video_str = params.get(KEY_QC_VIDEO_TNR_MODE);
+    const char *video_prev_str = get(KEY_QC_VIDEO_TNR_MODE);
+
+    if (m_bRecordingHint_new == true) {
+        if (video_str) {
+            if ((video_prev_str == NULL) || (strcmp(video_str, video_prev_str) != 0)) {
+                if (!strcmp(video_str, VALUE_ON)) {
+                    m_bTNRVideoOn = true;
+                    m_bTNRPreviewOn = true;
+                } else {
+                    m_bTNRVideoOn = false;
+                    m_bTNRPreviewOn = false;
+                }
+                updateParamEntry(KEY_QC_VIDEO_TNR_MODE, video_str);
+            } else {
+                return NO_ERROR;
+            }
+        } else {
+            char video_value[PROPERTY_VALUE_MAX];
+            memset(video_value, 0, sizeof(video_value));
+            property_get("persist.camera.tnr.video", video_value, VALUE_OFF);
+            if (!strcmp(video_value, VALUE_ON)) {
+                m_bTNRVideoOn = true;
+            } else {
+                m_bTNRVideoOn = false;
+            }
+            updateParamEntry(KEY_QC_VIDEO_TNR_MODE, video_value);
+
+            char preview_value[PROPERTY_VALUE_MAX];
+            memset(preview_value, 0, sizeof(preview_value));
+            property_get("persist.camera.tnr.preview", preview_value, video_value);
+            if (!strcmp(preview_value, VALUE_ON)) {
+                m_bTNRPreviewOn = true;
+            } else {
+                m_bTNRPreviewOn = false;
+            }
+            updateParamEntry(KEY_QC_TNR_MODE, preview_value);
+        }
+
+        cam_denoise_param_t temp;
+        memset(&temp, 0, sizeof(temp));
+        if (m_bTNRVideoOn || m_bTNRPreviewOn) {
+            temp.denoise_enable = 1;
+            temp.process_plates = getDenoiseProcessPlate(CAM_INTF_PARM_TEMPORAL_DENOISE);
+
+            int32_t cds_mode = lookupAttr(CDS_MODES_MAP, PARAM_MAP_SIZE(CDS_MODES_MAP),
+                    CDS_MODE_OFF);
+
+            if (cds_mode != NAME_NOT_FOUND) {
+                updateParamEntry(KEY_QC_VIDEO_CDS_MODE, CDS_MODE_OFF);
+                if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CDS_MODE, cds_mode)) {
+                    ALOGE("%s:Failed CDS MODE to update table", __func__);
+                    return BAD_VALUE;
+                }
+                CDBG("%s: CDS in video mode is set to = %s when TNR is enabled",
+                        __func__, CDS_MODE_OFF);
+                mCds_mode = cds_mode;
+            } else {
+                ALOGE("%s: Invalid argument for video CDS MODE %d", __func__, cds_mode);
+            }
+        }
+        CDBG("%s: TNR enable in video mode = %d, plates = %d", __func__,
+                temp.denoise_enable, temp.process_plates);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_TEMPORAL_DENOISE, temp)) {
+            return BAD_VALUE;
+        }
+    } else {
+        if (str) {
+            if ((prev_str == NULL) || (strcmp(str, prev_str) != 0)) {
+                if (!strcmp(str, VALUE_ON)) {
+                    m_bTNRPreviewOn = true;
+                } else {
+                    m_bTNRPreviewOn = false;
+                }
+                updateParamEntry(KEY_QC_TNR_MODE, str);
+            } else {
+                return NO_ERROR;
+            }
+        } else {
+            char value[PROPERTY_VALUE_MAX];
+            memset(value, 0, sizeof(value));
+            property_get("persist.camera.tnr.preview", value, VALUE_OFF);
+            if (!strcmp(value, VALUE_ON)) {
+                m_bTNRPreviewOn = true;
+            } else {
+                m_bTNRPreviewOn = false;
+            }
+            updateParamEntry(KEY_QC_TNR_MODE, value);
+        }
+        cam_denoise_param_t temp;
+        memset(&temp, 0, sizeof(temp));
+        if (m_bTNRPreviewOn) {
+            temp.denoise_enable = 1;
+            temp.process_plates = getDenoiseProcessPlate(CAM_INTF_PARM_TEMPORAL_DENOISE);
+
+            int32_t cds_mode = lookupAttr(CDS_MODES_MAP, PARAM_MAP_SIZE(CDS_MODES_MAP),
+                    CDS_MODE_OFF);
+
+            if (cds_mode != NAME_NOT_FOUND) {
+                updateParamEntry(KEY_QC_CDS_MODE, CDS_MODE_OFF);
+                if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CDS_MODE, cds_mode)) {
+                    ALOGE("%s:Failed CDS MODE to update table", __func__);
+                    return BAD_VALUE;
+                }
+                CDBG("%s: CDS in snapshot mode is set to = %s when TNR is enabled",
+                        __func__, CDS_MODE_OFF);
+                mCds_mode = cds_mode;
+            } else {
+                ALOGE("%s: Invalid argument for snapshot CDS MODE %d", __func__, cds_mode);
+            }
+        }
+        CDBG("%s: TNR enable in snapshot mode = %d, plates = %d", __func__,
+                temp.denoise_enable, temp.process_plates);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_TEMPORAL_DENOISE, temp)) {
+            return BAD_VALUE;
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setCameraMode
+ *
+ * DESCRIPTION: set camera mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setCameraMode(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_CAMERA_MODE);
+    if (str != NULL) {
+        set(KEY_QC_CAMERA_MODE, str);
+    } else {
+        remove(KEY_QC_CAMERA_MODE);
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSceneSelectionMode
+ *
+ * DESCRIPTION: set scene selection mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSceneSelectionMode(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_SCENE_SELECTION);
+    const char *prev_str = get(KEY_QC_SCENE_SELECTION);
+    if (NULL != str) {
+        if ((NULL == prev_str) || (strcmp(str, prev_str) != 0)) {
+            int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+                    PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), str);
+            if (value != NAME_NOT_FOUND) {
+                ALOGD("%s: Setting selection value %s", __func__, str);
+                if (value && m_bZslMode_new) {
+                    updateParamEntry(KEY_QC_SCENE_SELECTION, str);
+                    m_bNeedRestart = true;
+                    m_bSceneSelection = true;
+                } else if (!value) {
+                    updateParamEntry(KEY_QC_SCENE_SELECTION, str);
+                    m_bNeedRestart = true;
+                    m_bSceneSelection = false;
+                } else {
+                    ALOGE("%s: Trying to enable scene selection in non ZSL mode!!!",
+                            __func__);
+                    return BAD_VALUE;
+                }
+            } else {
+                ALOGE("%s: Trying to configure invalid scene selection value: %s",
+                        __func__,
+                        str);
+                return BAD_VALUE;
+            }
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSelectedScene
+ *
+ * DESCRIPTION: select specific scene
+ *
+ * PARAMETERS :
+ *   @scene   : scene mode
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSelectedScene(cam_scene_mode_type scene)
+{
+    Mutex::Autolock l(m_SceneSelectLock);
+    m_SelectedScene = scene;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getSelectedScene
+ *
+ * DESCRIPTION: get selected scene
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : currently selected scene
+ *==========================================================================*/
+cam_scene_mode_type QCameraParameters::getSelectedScene()
+{
+    Mutex::Autolock l(m_SceneSelectLock);
+    return m_SelectedScene;
+}
+
+/*==========================================================
+ * FUNCTION   : setRdiMode
+ *
+ * DESCRIPTION: set Rdi mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *===========================================================*/
+int32_t QCameraParameters::setRdiMode(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_RDI_MODE);
+    const char *prev_str = get(KEY_QC_RDI_MODE);
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+
+    property_get("persist.camera.rdi.mode", prop, VALUE_DISABLE);
+    if ((str != NULL) && (prev_str == NULL || strcmp(str, prev_str) != 0)) {
+        ALOGD("%s:%d : RDI mode set to %s", __func__, __LINE__, str);
+        setRdiMode(str);
+    } else if (prev_str == NULL || strcmp(prev_str, prop) != 0 ) {
+        ALOGD("%s:%d : RDI mode set to prop: %s", __func__, __LINE__, prop);
+        setRdiMode(prop);
+    }
+    return NO_ERROR;
+}
+
+/*==========================================================
+ * FUNCTION   : setSecureMode
+ *
+ * DESCRIPTION: set secure mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *===========================================================*/
+
+int32_t QCameraParameters::setSecureMode(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_SECURE_MODE);
+    const char *prev_str = get(KEY_QC_SECURE_MODE);
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+
+    property_get("persist.camera.secure.mode", prop, VALUE_DISABLE);
+    if ((str != NULL) && (prev_str == NULL || strcmp(str, prev_str) != 0)) {
+        ALOGD("%s : Secure mode set to KEY: %s", __func__, str);
+        setSecureMode(str);
+    } else if (prev_str == NULL || strcmp(prev_str, prop) != 0 ) {
+        ALOGD("%s : Secure mode set to prop: %s", __func__, prop);
+        setSecureMode(prop);
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setZslAttributes
+ *
+ * DESCRIPTION: set ZSL related attributes from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setZslAttributes(const QCameraParameters& params)
+{
+    // TODO: may switch to pure param instead of sysprop
+    char prop[PROPERTY_VALUE_MAX];
+
+    const char *str = params.get(KEY_QC_ZSL_BURST_INTERVAL);
+    if (str != NULL) {
+        set(KEY_QC_ZSL_BURST_INTERVAL, str);
+    } else {
+        memset(prop, 0, sizeof(prop));
+        property_get("persist.camera.zsl.interval", prop, "1");
+        set(KEY_QC_ZSL_BURST_INTERVAL, prop);
+        CDBG_HIGH("%s: [ZSL Retro] burst interval: %s", __func__, prop);
+    }
+
+    str = params.get(KEY_QC_ZSL_BURST_LOOKBACK);
+    if (str != NULL) {
+        set(KEY_QC_ZSL_BURST_LOOKBACK, str);
+    } else {
+        memset(prop, 0, sizeof(prop));
+        property_get("persist.camera.zsl.backlookcnt", prop, "2");
+        set(KEY_QC_ZSL_BURST_LOOKBACK, prop);
+        CDBG_HIGH("%s: [ZSL Retro] look back count: %s", __func__, prop);
+    }
+
+    str = params.get(KEY_QC_ZSL_QUEUE_DEPTH);
+    if (str != NULL) {
+        set(KEY_QC_ZSL_QUEUE_DEPTH, str);
+    } else {
+        memset(prop, 0, sizeof(prop));
+        property_get("persist.camera.zsl.queuedepth", prop, "2");
+        set(KEY_QC_ZSL_QUEUE_DEPTH, prop);
+        CDBG_HIGH("%s: [ZSL Retro] queue depth: %s", __func__, prop);
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFlip
+ *
+ * DESCRIPTION: set preview/ video/ picture flip mode from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFlip(const QCameraParameters& params)
+{
+    if ((m_pCapability->qcom_supported_feature_mask & CAM_QCOM_FEATURE_FLIP) == 0) {
+        CDBG_HIGH("%s: flip is not supported.", __func__);
+        return NO_ERROR;
+    }
+
+    //check preview flip setting
+    const char *str = params.get(KEY_QC_PREVIEW_FLIP);
+    const char *prev_val = get(KEY_QC_PREVIEW_FLIP);
+    if(str != NULL){
+        if (prev_val == NULL || strcmp(str, prev_val) != 0) {
+            int32_t value = lookupAttr(FLIP_MODES_MAP, PARAM_MAP_SIZE(FLIP_MODES_MAP), str);
+            if(value != NAME_NOT_FOUND){
+                set(KEY_QC_PREVIEW_FLIP, str);
+                m_bPreviewFlipChanged = true;
+            }
+        }
+    }
+
+    // check video filp setting
+    str = params.get(KEY_QC_VIDEO_FLIP);
+    prev_val = get(KEY_QC_VIDEO_FLIP);
+    if(str != NULL){
+        if (prev_val == NULL || strcmp(str, prev_val) != 0) {
+            int32_t value = lookupAttr(FLIP_MODES_MAP, PARAM_MAP_SIZE(FLIP_MODES_MAP), str);
+            if(value != NAME_NOT_FOUND){
+                set(KEY_QC_VIDEO_FLIP, str);
+                m_bVideoFlipChanged = true;
+            }
+        }
+    }
+
+    // check picture filp setting
+    str = params.get(KEY_QC_SNAPSHOT_PICTURE_FLIP);
+    prev_val = get(KEY_QC_SNAPSHOT_PICTURE_FLIP);
+    if(str != NULL){
+        if (prev_val == NULL || strcmp(str, prev_val) != 0) {
+            int32_t value = lookupAttr(FLIP_MODES_MAP, PARAM_MAP_SIZE(FLIP_MODES_MAP), str);
+            if(value != NAME_NOT_FOUND){
+                set(KEY_QC_SNAPSHOT_PICTURE_FLIP, str);
+                m_bSnapshotFlipChanged = true;
+            }
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setBurstNum
+ *
+ * DESCRIPTION: set burst number of snapshot
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setBurstNum(const QCameraParameters& params)
+{
+    int nBurstNum = params.getInt(KEY_QC_SNAPSHOT_BURST_NUM);
+    if (isAdvCamFeaturesEnabled()) {
+        nBurstNum = 1;
+    }
+    if (nBurstNum <= 0) {
+        // if burst number is not set in parameters,
+        // read from sys prop
+        char prop[PROPERTY_VALUE_MAX];
+        memset(prop, 0, sizeof(prop));
+        property_get("persist.camera.snapshot.number", prop, "0");
+        nBurstNum = atoi(prop);
+        if (nBurstNum <= 0) {
+            nBurstNum = 1;
+        }
+    }
+    set(KEY_QC_SNAPSHOT_BURST_NUM, nBurstNum);
+    m_nBurstNum = (uint8_t)nBurstNum;
+    CDBG_HIGH("%s: [ZSL Retro] m_nBurstNum = %d", __func__, m_nBurstNum);
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_BURST_NUM, (uint32_t)nBurstNum)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSnapshotFDReq
+ *
+ * DESCRIPTION: set requirement of Face Detection Metadata in Snapshot mode.
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSnapshotFDReq(const QCameraParameters& params)
+{
+    char prop[PROPERTY_VALUE_MAX];
+    const char *str = params.get(KEY_QC_SNAPSHOT_FD_DATA);
+
+    if(str != NULL){
+        set(KEY_QC_SNAPSHOT_FD_DATA, str);
+    }else{
+        memset(prop, 0, sizeof(prop));
+        property_get("persist.camera.snapshot.fd", prop, "0");
+        set(KEY_QC_SNAPSHOT_FD_DATA, prop);
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setMobicat
+ *
+ * DESCRIPTION: set Mobicat on/off.
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setMobicat(const QCameraParameters& )
+{
+    char value [PROPERTY_VALUE_MAX];
+    property_get("persist.camera.mobicat", value, "0");
+    int32_t ret = NO_ERROR;
+    uint8_t enableMobi = (uint8_t)atoi(value);
+
+    if (enableMobi) {
+        tune_cmd_t tune_cmd;
+        tune_cmd.type = 2;
+        tune_cmd.module = 0;
+        tune_cmd.value = 1;
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_SET_VFE_COMMAND, tune_cmd)) {
+            return BAD_VALUE;
+        }
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_SET_PP_COMMAND, tune_cmd)) {
+            ret = BAD_VALUE;
+        }
+    }
+    m_MobiMask = enableMobi;
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : updateParameters
+ *
+ * DESCRIPTION: update parameters from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *   @needRestart : [output] if preview need restart upon setting changes
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::updateParameters(QCameraParameters& params,
+        bool &needRestart)
+{
+    int32_t final_rc = NO_ERROR;
+    int32_t rc;
+    m_bNeedRestart = false;
+
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table",__func__);
+        rc = BAD_TYPE;
+        goto UPDATE_PARAM_DONE;
+    }
+
+    if ((rc = setPreviewSize(params)))                  final_rc = rc;
+    if ((rc = setVideoSize(params)))                    final_rc = rc;
+    if ((rc = setPictureSize(params)))                  final_rc = rc;
+    if ((rc = setPreviewFormat(params)))                final_rc = rc;
+    if ((rc = setPictureFormat(params)))                final_rc = rc;
+    if ((rc = setJpegQuality(params)))                  final_rc = rc;
+    if ((rc = setOrientation(params)))                  final_rc = rc;
+    if ((rc = setRotation(params)))                     final_rc = rc;
+    if ((rc = setVideoRotation(params)))                final_rc = rc;
+    if ((rc = setNoDisplayMode(params)))                final_rc = rc;
+    if ((rc = setZslMode(params)))                      final_rc = rc;
+    if ((rc = setZslAttributes(params)))                final_rc = rc;
+    if ((rc = setCameraMode(params)))                   final_rc = rc;
+    if ((rc = setSceneSelectionMode(params)))           final_rc = rc;
+    if ((rc = setRecordingHint(params)))                final_rc = rc;
+    if ((rc = setRdiMode(params)))                      final_rc = rc;
+    if ((rc = setSecureMode(params)))                   final_rc = rc;
+    if ((rc = setPreviewFrameRate(params)))             final_rc = rc;
+    if ((rc = setPreviewFpsRange(params)))              final_rc = rc;
+    if ((rc = setAutoExposure(params)))                 final_rc = rc;
+    if ((rc = setEffect(params)))                       final_rc = rc;
+    if ((rc = setBrightness(params)))                   final_rc = rc;
+    if ((rc = setZoom(params)))                         final_rc = rc;
+    if ((rc = setSharpness(params)))                    final_rc = rc;
+    if ((rc = setSaturation(params)))                   final_rc = rc;
+    if ((rc = setContrast(params)))                     final_rc = rc;
+    if ((rc = setFocusMode(params)))                    final_rc = rc;
+    if ((rc = setISOValue(params)))                     final_rc = rc;
+    if ((rc = setContinuousISO(params)))                final_rc = rc;
+    if ((rc = setExposureTime(params)))                 final_rc = rc;
+    if ((rc = setSkinToneEnhancement(params)))          final_rc = rc;
+    if ((rc = setFlash(params)))                        final_rc = rc;
+    if ((rc = setAecLock(params)))                      final_rc = rc;
+    if ((rc = setAwbLock(params)))                      final_rc = rc;
+    if ((rc = setLensShadeValue(params)))               final_rc = rc;
+    if ((rc = setMCEValue(params)))                     final_rc = rc;
+    if ((rc = setDISValue(params)))                     final_rc = rc;
+    if ((rc = setAntibanding(params)))                  final_rc = rc;
+    if ((rc = setExposureCompensation(params)))         final_rc = rc;
+    if ((rc = setWhiteBalance(params)))                 final_rc = rc;
+    if ((rc = setHDRMode(params)))                      final_rc = rc;
+    if ((rc = setHDRNeed1x(params)))                    final_rc = rc;
+    if ((rc = setManualWhiteBalance(params)))           final_rc = rc;
+    if ((rc = setSceneMode(params)))                    final_rc = rc;
+    if ((rc = setFocusAreas(params)))                   final_rc = rc;
+    if ((rc = setFocusPosition(params)))                final_rc = rc;
+    if ((rc = setMeteringAreas(params)))                final_rc = rc;
+    if ((rc = setSelectableZoneAf(params)))             final_rc = rc;
+    if ((rc = setRedeyeReduction(params)))              final_rc = rc;
+    if ((rc = setAEBracket(params)))                    final_rc = rc;
+    if ((rc = setAutoHDR(params)))                      final_rc = rc;
+    if ((rc = setGpsLocation(params)))                  final_rc = rc;
+    if ((rc = setWaveletDenoise(params)))               final_rc = rc;
+    if ((rc = setFaceRecognition(params)))              final_rc = rc;
+    if ((rc = setFlip(params)))                         final_rc = rc;
+    if ((rc = setVideoHDR(params)))                     final_rc = rc;
+    if ((rc = setVtEnable(params)))                     final_rc = rc;
+    if ((rc = setAFBracket(params)))                    final_rc = rc;
+    if ((rc = setReFocus(params)))                      final_rc = rc;
+    if ((rc = setChromaFlash(params)))                  final_rc = rc;
+    if ((rc = setTruePortrait(params)))                 final_rc = rc;
+    if ((rc = setOptiZoom(params)))                     final_rc = rc;
+    if ((rc = setBurstNum(params)))                     final_rc = rc;
+    if ((rc = setBurstLEDOnPeriod(params)))             final_rc = rc;
+    if ((rc = setRetroActiveBurstNum(params)))          final_rc = rc;
+    if ((rc = setSnapshotFDReq(params)))                final_rc = rc;
+    if ((rc = setTintlessValue(params)))                final_rc = rc;
+    if ((rc = setCDSMode(params)))                      final_rc = rc;
+    if ((rc = setTemporalDenoise(params)))              final_rc = rc;
+
+    // update live snapshot size after all other parameters are set
+    if ((rc = setLiveSnapshotSize(params)))             final_rc = rc;
+    if ((rc = setJpegThumbnailSize(params)))            final_rc = rc;
+    if ((rc = setStatsDebugMask()))                     final_rc = rc;
+    if ((rc = setPAAF()))                               final_rc = rc;
+    if ((rc = setMobicat(params)))                      final_rc = rc;
+    if ((rc = setSeeMore(params)))                      final_rc = rc;
+    if ((rc = setStillMore(params)))                    final_rc = rc;
+
+    if ((rc = updateFlash(false)))                      final_rc = rc;
+
+UPDATE_PARAM_DONE:
+    needRestart = m_bNeedRestart;
+    return final_rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : commitParameters
+ *
+ * DESCRIPTION: commit parameter changes to backend
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::commitParameters()
+{
+    return commitSetBatch();
+}
+
+/*===========================================================================
+ * FUNCTION   : initDefaultParameters
+ *
+ * DESCRIPTION: initialize default parameters for the first time
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::initDefaultParameters()
+{
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+    int32_t hal_version = CAM_HAL_V1;
+    ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_HAL_VERSION, hal_version);
+
+    /*************************Initialize Values******************************/
+    // Set read only parameters from camera capability
+    set(KEY_SMOOTH_ZOOM_SUPPORTED,
+        m_pCapability->smooth_zoom_supported? VALUE_TRUE : VALUE_FALSE);
+    set(KEY_ZOOM_SUPPORTED,
+        m_pCapability->zoom_supported? VALUE_TRUE : VALUE_FALSE);
+    set(KEY_VIDEO_SNAPSHOT_SUPPORTED,
+        m_pCapability->video_snapshot_supported? VALUE_TRUE : VALUE_FALSE);
+    set(KEY_VIDEO_STABILIZATION_SUPPORTED,
+        m_pCapability->video_stablization_supported? VALUE_TRUE : VALUE_FALSE);
+    set(KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
+        m_pCapability->auto_exposure_lock_supported? VALUE_TRUE : VALUE_FALSE);
+    set(KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
+        m_pCapability->auto_wb_lock_supported? VALUE_TRUE : VALUE_FALSE);
+    set(KEY_QC_SUPPORTED_CAMERA_FEATURES,
+            (int)m_pCapability->qcom_supported_feature_mask);
+    set(KEY_MAX_NUM_DETECTED_FACES_HW, m_pCapability->max_num_roi);
+    set(KEY_MAX_NUM_DETECTED_FACES_SW, m_pCapability->max_num_roi);
+    set(KEY_QC_MAX_NUM_REQUESTED_FACES, m_pCapability->max_num_roi);
+    // Set focal length, horizontal view angle, and vertical view angle
+    setFloat(KEY_FOCAL_LENGTH, m_pCapability->focal_length);
+    setFloat(KEY_HORIZONTAL_VIEW_ANGLE, m_pCapability->hor_view_angle);
+    setFloat(KEY_VERTICAL_VIEW_ANGLE, m_pCapability->ver_view_angle);
+    set(QCameraParameters::KEY_FOCUS_DISTANCES, "Infinity,Infinity,Infinity");
+    set(KEY_QC_AUTO_HDR_SUPPORTED,
+        (m_pCapability->auto_hdr_supported)? VALUE_TRUE : VALUE_FALSE);
+    // Set supported preview sizes
+    if (m_pCapability->preview_sizes_tbl_cnt > 0 &&
+        m_pCapability->preview_sizes_tbl_cnt <= MAX_SIZES_CNT) {
+        String8 previewSizeValues = createSizesString(
+                m_pCapability->preview_sizes_tbl, m_pCapability->preview_sizes_tbl_cnt);
+        set(KEY_SUPPORTED_PREVIEW_SIZES, previewSizeValues.string());
+        CDBG_HIGH("%s: supported preview sizes: %s", __func__, previewSizeValues.string());
+        // Set default preview size
+        CameraParameters::setPreviewSize(m_pCapability->preview_sizes_tbl[0].width,
+                                         m_pCapability->preview_sizes_tbl[0].height);
+    } else {
+        ALOGE("%s: supported preview sizes cnt is 0 or exceeds max!!!", __func__);
+    }
+
+    // Set supported video sizes
+    if (m_pCapability->video_sizes_tbl_cnt > 0 &&
+        m_pCapability->video_sizes_tbl_cnt <= MAX_SIZES_CNT) {
+        String8 videoSizeValues = createSizesString(
+                m_pCapability->video_sizes_tbl, m_pCapability->video_sizes_tbl_cnt);
+        set(KEY_SUPPORTED_VIDEO_SIZES, videoSizeValues.string());
+        CDBG_HIGH("%s: supported video sizes: %s", __func__, videoSizeValues.string());
+        // Set default video size
+        CameraParameters::setVideoSize(m_pCapability->video_sizes_tbl[0].width,
+                                       m_pCapability->video_sizes_tbl[0].height);
+
+        //Set preferred Preview size for video
+        String8 vSize = createSizesString(&m_pCapability->preview_sizes_tbl[0], 1);
+        set(KEY_PREFERRED_PREVIEW_SIZE_FOR_VIDEO, vSize.string());
+    } else {
+        ALOGE("%s: supported video sizes cnt is 0 or exceeds max!!!", __func__);
+    }
+
+    // Set supported picture sizes
+    if (m_pCapability->picture_sizes_tbl_cnt > 0 &&
+        m_pCapability->picture_sizes_tbl_cnt <= MAX_SIZES_CNT) {
+        String8 pictureSizeValues = createSizesString(
+                m_pCapability->picture_sizes_tbl, m_pCapability->picture_sizes_tbl_cnt);
+        set(KEY_SUPPORTED_PICTURE_SIZES, pictureSizeValues.string());
+        CDBG_HIGH("%s: supported pic sizes: %s", __func__, pictureSizeValues.string());
+        // Set default picture size to the smallest resolution
+        CameraParameters::setPictureSize(
+           m_pCapability->picture_sizes_tbl[m_pCapability->picture_sizes_tbl_cnt-1].width,
+           m_pCapability->picture_sizes_tbl[m_pCapability->picture_sizes_tbl_cnt-1].height);
+    } else {
+        ALOGE("%s: supported picture sizes cnt is 0 or exceeds max!!!", __func__);
+    }
+
+    // Need check if scale should be enabled
+    if (m_pCapability->scale_picture_sizes_cnt > 0 &&
+        m_pCapability->scale_picture_sizes_cnt <= MAX_SCALE_SIZES_CNT){
+        //get scale size, enable scaling. And re-set picture size table with scale sizes
+        m_reprocScaleParam.setScaleEnable(true);
+        int rc_s = m_reprocScaleParam.setScaleSizeTbl(
+            m_pCapability->scale_picture_sizes_cnt, m_pCapability->scale_picture_sizes,
+            m_pCapability->picture_sizes_tbl_cnt, m_pCapability->picture_sizes_tbl);
+        if(rc_s == NO_ERROR){
+            cam_dimension_t *totalSizeTbl = m_reprocScaleParam.getTotalSizeTbl();
+            size_t totalSizeCnt = m_reprocScaleParam.getTotalSizeTblCnt();
+            String8 pictureSizeValues = createSizesString(totalSizeTbl, totalSizeCnt);
+            set(KEY_SUPPORTED_PICTURE_SIZES, pictureSizeValues.string());
+            CDBG_HIGH("%s: scaled supported pic sizes: %s", __func__, pictureSizeValues.string());
+        }else{
+            m_reprocScaleParam.setScaleEnable(false);
+            ALOGE("%s: reset scaled picture size table failed.", __func__);
+        }
+    }else{
+        m_reprocScaleParam.setScaleEnable(false);
+    }
+
+    // Set supported thumbnail sizes
+    String8 thumbnailSizeValues = createSizesString(
+            THUMBNAIL_SIZES_MAP,
+            PARAM_MAP_SIZE(THUMBNAIL_SIZES_MAP));
+    set(KEY_SUPPORTED_JPEG_THUMBNAIL_SIZES, thumbnailSizeValues.string());
+    // Set default thumnail size
+    set(KEY_JPEG_THUMBNAIL_WIDTH, THUMBNAIL_SIZES_MAP[0].width);
+    set(KEY_JPEG_THUMBNAIL_HEIGHT, THUMBNAIL_SIZES_MAP[0].height);
+
+    // Set supported livesnapshot sizes
+    if (m_pCapability->livesnapshot_sizes_tbl_cnt > 0 &&
+        m_pCapability->livesnapshot_sizes_tbl_cnt <= MAX_SIZES_CNT) {
+        String8 liveSnpashotSizeValues = createSizesString(
+                m_pCapability->livesnapshot_sizes_tbl,
+                m_pCapability->livesnapshot_sizes_tbl_cnt);
+        set(KEY_QC_SUPPORTED_LIVESNAPSHOT_SIZES, liveSnpashotSizeValues.string());
+        CDBG("%s: supported live snapshot sizes: %s", __func__, liveSnpashotSizeValues.string());
+        m_LiveSnapshotSize =
+            m_pCapability->livesnapshot_sizes_tbl[m_pCapability->livesnapshot_sizes_tbl_cnt-1];
+    }
+
+    // Set supported preview formats
+    String8 previewFormatValues = createValuesString(
+            m_pCapability->supported_preview_fmts,
+            m_pCapability->supported_preview_fmt_cnt,
+            PREVIEW_FORMATS_MAP,
+            PARAM_MAP_SIZE(PREVIEW_FORMATS_MAP));
+    set(KEY_SUPPORTED_PREVIEW_FORMATS, previewFormatValues.string());
+    // Set default preview format
+    CameraParameters::setPreviewFormat(PIXEL_FORMAT_YUV420SP);
+
+    // Set default Video Format
+    set(KEY_VIDEO_FRAME_FORMAT, PIXEL_FORMAT_YUV420SP);
+
+    // Set supported picture formats
+    String8 pictureTypeValues(PIXEL_FORMAT_JPEG);
+    String8 str = createValuesString(
+            m_pCapability->supported_raw_fmts,
+            m_pCapability->supported_raw_fmt_cnt,
+            PICTURE_TYPES_MAP,
+            PARAM_MAP_SIZE(PICTURE_TYPES_MAP));
+    if (str.string() != NULL) {
+        pictureTypeValues.append(",");
+        pictureTypeValues.append(str);
+    }
+
+    set(KEY_SUPPORTED_PICTURE_FORMATS, pictureTypeValues.string());
+    // Set default picture Format
+    CameraParameters::setPictureFormat(PIXEL_FORMAT_JPEG);
+    // Set raw image size
+    char raw_size_str[32];
+    snprintf(raw_size_str, sizeof(raw_size_str), "%dx%d",
+             m_pCapability->raw_dim[0].width, m_pCapability->raw_dim[0].height);
+    set(KEY_QC_RAW_PICUTRE_SIZE, raw_size_str);
+    CDBG("%s: KEY_QC_RAW_PICUTRE_SIZE: w: %d, h: %d ", __func__,
+       m_pCapability->raw_dim[0].width, m_pCapability->raw_dim[0].height);
+
+    //set default jpeg quality and thumbnail quality
+    set(KEY_JPEG_QUALITY, 85);
+    set(KEY_JPEG_THUMBNAIL_QUALITY, 85);
+
+    // Set FPS ranges
+    if (m_pCapability->fps_ranges_tbl_cnt > 0 &&
+        m_pCapability->fps_ranges_tbl_cnt <= MAX_SIZES_CNT) {
+        int default_fps_index = 0;
+        String8 fpsRangeValues = createFpsRangeString(m_pCapability->fps_ranges_tbl,
+                                                      m_pCapability->fps_ranges_tbl_cnt,
+                                                      default_fps_index);
+        set(KEY_SUPPORTED_PREVIEW_FPS_RANGE, fpsRangeValues.string());
+
+        int min_fps =
+            int(m_pCapability->fps_ranges_tbl[default_fps_index].min_fps * 1000);
+        int max_fps =
+            int(m_pCapability->fps_ranges_tbl[default_fps_index].max_fps * 1000);
+        m_default_fps_range = m_pCapability->fps_ranges_tbl[default_fps_index];
+        //Set video fps same as preview fps
+        setPreviewFpsRange(min_fps, max_fps, min_fps, max_fps);
+
+        // Set legacy preview fps
+        String8 fpsValues = createFpsString(m_pCapability->fps_ranges_tbl[default_fps_index]);
+        set(KEY_SUPPORTED_PREVIEW_FRAME_RATES, fpsValues.string());
+        CDBG_HIGH("%s: supported fps rates: %s", __func__, fpsValues.string());
+        CameraParameters::setPreviewFrameRate(int(m_pCapability->fps_ranges_tbl[default_fps_index].max_fps));
+    } else {
+        ALOGE("%s: supported fps ranges cnt is 0 or exceeds max!!!", __func__);
+    }
+
+    // Set supported focus modes
+    if (m_pCapability->supported_focus_modes_cnt > 0) {
+        String8 focusModeValues = createValuesString(
+                m_pCapability->supported_focus_modes,
+                m_pCapability->supported_focus_modes_cnt,
+                FOCUS_MODES_MAP,
+                PARAM_MAP_SIZE(FOCUS_MODES_MAP));
+        set(KEY_SUPPORTED_FOCUS_MODES, focusModeValues);
+
+        // Set default focus mode and update corresponding parameter buf
+        const char *focusMode = lookupNameByValue(FOCUS_MODES_MAP,
+                PARAM_MAP_SIZE(FOCUS_MODES_MAP),
+                m_pCapability->supported_focus_modes[0]);
+        if (focusMode != NULL) {
+            setFocusMode(focusMode);
+        } else {
+            setFocusMode(FOCUS_MODE_FIXED);
+        }
+    } else {
+        ALOGE("%s: supported focus modes cnt is 0!!!", __func__);
+    }
+
+    // Set focus areas
+    if (m_pCapability->max_num_focus_areas > MAX_ROI) {
+        m_pCapability->max_num_focus_areas = MAX_ROI;
+    }
+    set(KEY_MAX_NUM_FOCUS_AREAS, m_pCapability->max_num_focus_areas);
+    if (m_pCapability->max_num_focus_areas > 0) {
+        setFocusAreas(DEFAULT_CAMERA_AREA);
+    }
+
+    // Set metering areas
+    if (m_pCapability->max_num_metering_areas > MAX_ROI) {
+        m_pCapability->max_num_metering_areas = MAX_ROI;
+    }
+    set(KEY_MAX_NUM_METERING_AREAS, m_pCapability->max_num_metering_areas);
+    if (m_pCapability->max_num_metering_areas > 0) {
+        setMeteringAreas(DEFAULT_CAMERA_AREA);
+    }
+
+    // set focus position, we should get them from m_pCapability
+    m_pCapability->min_focus_pos[CAM_MANUAL_FOCUS_MODE_INDEX] = 0;
+    m_pCapability->max_focus_pos[CAM_MANUAL_FOCUS_MODE_INDEX] = 1023;
+    set(KEY_QC_MIN_FOCUS_POS_INDEX,
+            (int) m_pCapability->min_focus_pos[CAM_MANUAL_FOCUS_MODE_INDEX]);
+    set(KEY_QC_MAX_FOCUS_POS_INDEX,
+            (int) m_pCapability->max_focus_pos[CAM_MANUAL_FOCUS_MODE_INDEX]);
+
+    m_pCapability->min_focus_pos[CAM_MANUAL_FOCUS_MODE_DAC_CODE] = 0;
+    m_pCapability->max_focus_pos[CAM_MANUAL_FOCUS_MODE_DAC_CODE] = 1023;
+    set(KEY_QC_MIN_FOCUS_POS_DAC,
+            (int) m_pCapability->min_focus_pos[CAM_MANUAL_FOCUS_MODE_DAC_CODE]);
+    set(KEY_QC_MAX_FOCUS_POS_DAC,
+            (int) m_pCapability->max_focus_pos[CAM_MANUAL_FOCUS_MODE_DAC_CODE]);
+
+    m_pCapability->min_focus_pos[CAM_MANUAL_FOCUS_MODE_RATIO] = 0;
+    m_pCapability->max_focus_pos[CAM_MANUAL_FOCUS_MODE_RATIO] = 100;
+    set(KEY_QC_MIN_FOCUS_POS_RATIO,
+            (int) m_pCapability->min_focus_pos[CAM_MANUAL_FOCUS_MODE_RATIO]);
+    set(KEY_QC_MAX_FOCUS_POS_RATIO,
+            (int) m_pCapability->max_focus_pos[CAM_MANUAL_FOCUS_MODE_RATIO]);
+
+    m_pCapability->min_focus_pos[CAM_MANUAL_FOCUS_MODE_DIOPTER] = 0;
+    if (m_pCapability->min_focus_distance > 0) {
+        m_pCapability->max_focus_pos[CAM_MANUAL_FOCUS_MODE_DIOPTER] =
+                100.0f / m_pCapability->min_focus_distance;
+    } else {
+        m_pCapability->max_focus_pos[CAM_MANUAL_FOCUS_MODE_DIOPTER] = 0;
+    }
+    setFloat(KEY_QC_MIN_FOCUS_POS_DIOPTER,
+            m_pCapability->min_focus_pos[CAM_MANUAL_FOCUS_MODE_DIOPTER]);
+    setFloat(KEY_QC_MAX_FOCUS_POS_DIOPTER,
+            m_pCapability->max_focus_pos[CAM_MANUAL_FOCUS_MODE_DIOPTER]);
+
+    //set supported manual focus modes
+    String8 manualFocusModes(VALUE_OFF);
+    if (m_pCapability->supported_focus_modes_cnt > 1 &&
+        m_pCapability->min_focus_distance > 0) {
+        manualFocusModes.append(",");
+        manualFocusModes.append(KEY_QC_FOCUS_SCALE_MODE);
+        manualFocusModes.append(",");
+        manualFocusModes.append(KEY_QC_FOCUS_DIOPTER_MODE);
+    }
+    set(KEY_QC_SUPPORTED_MANUAL_FOCUS_MODES, manualFocusModes.string());
+
+    // Set Saturation
+    set(KEY_QC_MIN_SATURATION, m_pCapability->saturation_ctrl.min_value);
+    set(KEY_QC_MAX_SATURATION, m_pCapability->saturation_ctrl.max_value);
+    set(KEY_QC_SATURATION_STEP, m_pCapability->saturation_ctrl.step);
+    setSaturation(m_pCapability->saturation_ctrl.def_value);
+
+    // Set Sharpness
+    set(KEY_QC_MIN_SHARPNESS, m_pCapability->sharpness_ctrl.min_value);
+    set(KEY_QC_MAX_SHARPNESS, m_pCapability->sharpness_ctrl.max_value);
+    set(KEY_QC_SHARPNESS_STEP, m_pCapability->sharpness_ctrl.step);
+    setSharpness(m_pCapability->sharpness_ctrl.def_value);
+
+    // Set Contrast
+    set(KEY_QC_MIN_CONTRAST, m_pCapability->contrast_ctrl.min_value);
+    set(KEY_QC_MAX_CONTRAST, m_pCapability->contrast_ctrl.max_value);
+    set(KEY_QC_CONTRAST_STEP, m_pCapability->contrast_ctrl.step);
+    setContrast(m_pCapability->contrast_ctrl.def_value);
+
+    // Set SCE factor
+    set(KEY_QC_MIN_SCE_FACTOR, m_pCapability->sce_ctrl.min_value); // -100
+    set(KEY_QC_MAX_SCE_FACTOR, m_pCapability->sce_ctrl.max_value); // 100
+    set(KEY_QC_SCE_FACTOR_STEP, m_pCapability->sce_ctrl.step);     // 10
+    setSkinToneEnhancement(m_pCapability->sce_ctrl.def_value);     // 0
+
+    // Set Brightness
+    set(KEY_QC_MIN_BRIGHTNESS, m_pCapability->brightness_ctrl.min_value); // 0
+    set(KEY_QC_MAX_BRIGHTNESS, m_pCapability->brightness_ctrl.max_value); // 6
+    set(KEY_QC_BRIGHTNESS_STEP, m_pCapability->brightness_ctrl.step);     // 1
+    setBrightness(m_pCapability->brightness_ctrl.def_value);
+
+    // Set Auto exposure
+    String8 autoExposureValues = createValuesString(
+            m_pCapability->supported_aec_modes,
+            m_pCapability->supported_aec_modes_cnt,
+            AUTO_EXPOSURE_MAP,
+            PARAM_MAP_SIZE(AUTO_EXPOSURE_MAP));
+    set(KEY_QC_SUPPORTED_AUTO_EXPOSURE, autoExposureValues.string());
+    setAutoExposure(AUTO_EXPOSURE_FRAME_AVG);
+
+    // Set Exposure Compensation
+    set(KEY_MAX_EXPOSURE_COMPENSATION, m_pCapability->exposure_compensation_max); // 12
+    set(KEY_MIN_EXPOSURE_COMPENSATION, m_pCapability->exposure_compensation_min); // -12
+    setFloat(KEY_EXPOSURE_COMPENSATION_STEP, m_pCapability->exposure_compensation_step); // 1/6
+    setExposureCompensation(m_pCapability->exposure_compensation_default); // 0
+
+    // Set Antibanding
+    String8 antibandingValues = createValuesString(
+            m_pCapability->supported_antibandings,
+            m_pCapability->supported_antibandings_cnt,
+            ANTIBANDING_MODES_MAP,
+            PARAM_MAP_SIZE(ANTIBANDING_MODES_MAP));
+    set(KEY_SUPPORTED_ANTIBANDING, antibandingValues);
+    setAntibanding(ANTIBANDING_OFF);
+
+    // Set Effect
+    String8 effectValues = createValuesString(
+            m_pCapability->supported_effects,
+            m_pCapability->supported_effects_cnt,
+            EFFECT_MODES_MAP,
+            PARAM_MAP_SIZE(EFFECT_MODES_MAP));
+    set(KEY_SUPPORTED_EFFECTS, effectValues);
+    setEffect(EFFECT_NONE);
+
+    // Set WhiteBalance
+    String8 whitebalanceValues = createValuesString(
+            m_pCapability->supported_white_balances,
+            m_pCapability->supported_white_balances_cnt,
+            WHITE_BALANCE_MODES_MAP,
+            PARAM_MAP_SIZE(WHITE_BALANCE_MODES_MAP));
+    set(KEY_SUPPORTED_WHITE_BALANCE, whitebalanceValues);
+    setWhiteBalance(WHITE_BALANCE_AUTO);
+
+    // set supported wb cct, we should get them from m_pCapability
+    m_pCapability->min_wb_cct = 2000;
+    m_pCapability->max_wb_cct = 8000;
+    set(KEY_QC_MIN_WB_CCT, m_pCapability->min_wb_cct);
+    set(KEY_QC_MAX_WB_CCT, m_pCapability->max_wb_cct);
+
+    // set supported wb rgb gains, ideally we should get them from m_pCapability
+    //but for now hardcode.
+    m_pCapability->min_wb_gain = 1.0;
+    m_pCapability->max_wb_gain = 4.0;
+    setFloat(KEY_QC_MIN_WB_GAIN, m_pCapability->min_wb_gain);
+    setFloat(KEY_QC_MAX_WB_GAIN, m_pCapability->max_wb_gain);
+
+    //set supported manual wb modes
+    String8 manualWBModes(VALUE_OFF);
+    if(m_pCapability->sensor_type.sens_type != CAM_SENSOR_YUV) {
+        manualWBModes.append(",");
+        manualWBModes.append(KEY_QC_WB_CCT_MODE);
+        manualWBModes.append(",");
+        manualWBModes.append(KEY_QC_WB_GAIN_MODE);
+    }
+    set(KEY_QC_SUPPORTED_MANUAL_WB_MODES, manualWBModes.string());
+
+    // Set Flash mode
+    if(m_pCapability->supported_flash_modes_cnt > 0) {
+       String8 flashValues = createValuesString(
+               m_pCapability->supported_flash_modes,
+               m_pCapability->supported_flash_modes_cnt,
+               FLASH_MODES_MAP,
+               PARAM_MAP_SIZE(FLASH_MODES_MAP));
+       set(KEY_SUPPORTED_FLASH_MODES, flashValues);
+       setFlash(FLASH_MODE_OFF);
+    } else {
+        ALOGE("%s: supported flash modes cnt is 0!!!", __func__);
+    }
+
+    // Set Scene Mode
+    String8 sceneModeValues = createValuesString(
+            m_pCapability->supported_scene_modes,
+            m_pCapability->supported_scene_modes_cnt,
+            SCENE_MODES_MAP,
+            PARAM_MAP_SIZE(SCENE_MODES_MAP));
+    set(KEY_SUPPORTED_SCENE_MODES, sceneModeValues);
+    setSceneMode(SCENE_MODE_AUTO);
+
+    // Set CDS Mode
+    String8 cdsModeValues = createValuesStringFromMap(
+            CDS_MODES_MAP,
+            PARAM_MAP_SIZE(CDS_MODES_MAP));
+    set(KEY_QC_SUPPORTED_CDS_MODES, cdsModeValues);
+
+    // Set video CDS Mode
+    String8 videoCdsModeValues = createValuesStringFromMap(
+            CDS_MODES_MAP,
+            PARAM_MAP_SIZE(CDS_MODES_MAP));
+    set(KEY_QC_SUPPORTED_VIDEO_CDS_MODES, videoCdsModeValues);
+
+    // Set TNR Mode
+    String8 tnrModeValues = createValuesStringFromMap(
+            ON_OFF_MODES_MAP,
+            PARAM_MAP_SIZE(ON_OFF_MODES_MAP));
+    set(KEY_QC_SUPPORTED_TNR_MODES, tnrModeValues);
+
+    // Set video TNR Mode
+    String8 videoTnrModeValues = createValuesStringFromMap(
+            ON_OFF_MODES_MAP,
+            PARAM_MAP_SIZE(ON_OFF_MODES_MAP));
+    set(KEY_QC_SUPPORTED_VIDEO_TNR_MODES, videoTnrModeValues);
+
+    // Set ISO Mode
+    String8 isoValues = createValuesString(
+            m_pCapability->supported_iso_modes,
+            m_pCapability->supported_iso_modes_cnt,
+            ISO_MODES_MAP,
+            PARAM_MAP_SIZE(ISO_MODES_MAP));
+    set(KEY_QC_SUPPORTED_ISO_MODES, isoValues);
+    setISOValue(ISO_AUTO);
+
+    // Set exposure time
+    String8 manualExpModes(VALUE_OFF);
+    bool expTimeSupported = false;
+    bool manualISOSupported = false;
+    //capability values are in nano sec, convert to milli sec for upper layers
+    char expTimeStr[20];
+    double min_exp_time = (double) m_pCapability->exposure_time_range[0] / 1000000.0;
+    double max_exp_time = (double) m_pCapability->exposure_time_range[1] / 1000000.0;
+    snprintf(expTimeStr, sizeof(expTimeStr), "%f", min_exp_time);
+    set(KEY_QC_MIN_EXPOSURE_TIME, expTimeStr);
+    snprintf(expTimeStr, sizeof(expTimeStr), "%f", max_exp_time);
+    set(KEY_QC_MAX_EXPOSURE_TIME, expTimeStr);
+    if ((min_exp_time > 0) && (max_exp_time > min_exp_time)) {
+        manualExpModes.append(",");
+        manualExpModes.append(KEY_QC_EXP_TIME_PRIORITY);
+        expTimeSupported = true;
+    }
+    CDBG_HIGH("%s, Exposure time min %f ms, max %f ms", __func__,
+            min_exp_time, max_exp_time);
+
+    // Set iso
+    set(KEY_QC_MIN_ISO, m_pCapability->sensitivity_range.min_sensitivity);
+    set(KEY_QC_MAX_ISO, m_pCapability->sensitivity_range.max_sensitivity);
+    CDBG_HIGH("%s, ISO min %d, max %d", __func__,
+            m_pCapability->sensitivity_range.min_sensitivity,
+            m_pCapability->sensitivity_range.max_sensitivity);
+    if ((m_pCapability->sensitivity_range.min_sensitivity > 0) &&
+            (m_pCapability->sensitivity_range.max_sensitivity >
+                    m_pCapability->sensitivity_range.min_sensitivity)) {
+        manualExpModes.append(",");
+        manualExpModes.append(KEY_QC_ISO_PRIORITY);
+        manualISOSupported = true;
+    }
+    if (expTimeSupported && manualISOSupported) {
+        manualExpModes.append(",");
+        manualExpModes.append(KEY_QC_USER_SETTING);
+    }
+    //finally set supported manual exposure modes
+    set(KEY_QC_SUPPORTED_MANUAL_EXPOSURE_MODES, manualExpModes.string());
+
+    // Set HFR
+    String8 hfrValues = createHfrValuesString(
+            m_pCapability->hfr_tbl,
+            m_pCapability->hfr_tbl_cnt,
+            HFR_MODES_MAP,
+            PARAM_MAP_SIZE(HFR_MODES_MAP));
+    set(KEY_QC_SUPPORTED_VIDEO_HIGH_FRAME_RATE_MODES, hfrValues.string());
+    set(KEY_QC_VIDEO_HIGH_SPEED_RECORDING, "off");
+    set(KEY_QC_VIDEO_HIGH_FRAME_RATE, "off");
+    String8 hfrSizeValues = createHfrSizesString(
+            m_pCapability->hfr_tbl,
+            m_pCapability->hfr_tbl_cnt);
+    set(KEY_QC_SUPPORTED_HFR_SIZES, hfrSizeValues.string());
+    CDBG("HFR values %s HFR Sizes = %d", hfrValues.string(), hfrSizeValues.string());
+    setHighFrameRate(CAM_HFR_MODE_OFF);
+
+    // Set Focus algorithms
+    String8 focusAlgoValues = createValuesString(
+            m_pCapability->supported_focus_algos,
+            m_pCapability->supported_focus_algos_cnt,
+            FOCUS_ALGO_MAP,
+            PARAM_MAP_SIZE(FOCUS_ALGO_MAP));
+    set(KEY_QC_SUPPORTED_FOCUS_ALGOS, focusAlgoValues);
+    setSelectableZoneAf(FOCUS_ALGO_AUTO);
+
+    // Set Zoom Ratios
+    if (m_pCapability->zoom_supported > 0) {
+        String8 zoomRatioValues = createZoomRatioValuesString(
+                m_pCapability->zoom_ratio_tbl,
+                m_pCapability->zoom_ratio_tbl_cnt);
+        set(KEY_ZOOM_RATIOS, zoomRatioValues);
+        set(KEY_MAX_ZOOM, (int)(m_pCapability->zoom_ratio_tbl_cnt - 1));
+        setZoom(0);
+    }
+
+    // Set Bracketing/HDR
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.capture.burst.exposures", prop, "");
+    if (strlen(prop) > 0) {
+        set(KEY_QC_CAPTURE_BURST_EXPOSURE, prop);
+    }
+    String8 bracketingValues = createValuesStringFromMap(
+            BRACKETING_MODES_MAP,
+            PARAM_MAP_SIZE(BRACKETING_MODES_MAP));
+    set(KEY_QC_SUPPORTED_AE_BRACKET_MODES, bracketingValues);
+    setAEBracket(AE_BRACKET_OFF);
+
+    //Set AF Bracketing.
+    for (size_t i = 0; i < m_pCapability->supported_focus_modes_cnt; i++) {
+        if ((CAM_FOCUS_MODE_AUTO == m_pCapability->supported_focus_modes[i]) &&
+                ((m_pCapability->qcom_supported_feature_mask &
+                        CAM_QCOM_FEATURE_UBIFOCUS) > 0)) {
+            String8 afBracketingValues = createValuesStringFromMap(
+                    AF_BRACKETING_MODES_MAP,
+                    PARAM_MAP_SIZE(AF_BRACKETING_MODES_MAP));
+            set(KEY_QC_SUPPORTED_AF_BRACKET_MODES, afBracketingValues);
+            setAFBracket(AF_BRACKET_OFF);
+            break;
+         }
+    }
+
+    //Set Refocus.
+    //Re-use ubifocus flag for now.
+    for (size_t i = 0; i < m_pCapability->supported_focus_modes_cnt; i++) {
+        if ((CAM_FOCUS_MODE_AUTO == m_pCapability->supported_focus_modes[i]) &&
+                (m_pCapability->qcom_supported_feature_mask &
+                    CAM_QCOM_FEATURE_REFOCUS) > 0) {
+            String8 reFocusValues = createValuesStringFromMap(
+                    RE_FOCUS_MODES_MAP,
+                    PARAM_MAP_SIZE(RE_FOCUS_MODES_MAP));
+            set(KEY_QC_SUPPORTED_RE_FOCUS_MODES, reFocusValues);
+            setReFocus(RE_FOCUS_OFF);
+        }
+    }
+
+    //Set Chroma Flash.
+    if ((m_pCapability->supported_flash_modes_cnt > 0) &&
+            (m_pCapability->qcom_supported_feature_mask &
+            CAM_QCOM_FEATURE_CHROMA_FLASH) > 0) {
+        String8 chromaFlashValues = createValuesStringFromMap(
+                CHROMA_FLASH_MODES_MAP,
+                PARAM_MAP_SIZE(CHROMA_FLASH_MODES_MAP));
+        set(KEY_QC_SUPPORTED_CHROMA_FLASH_MODES, chromaFlashValues);
+        setChromaFlash(CHROMA_FLASH_OFF);
+    }
+
+    //Set Opti Zoom.
+    if (m_pCapability->zoom_supported &&
+            (m_pCapability->qcom_supported_feature_mask &
+            CAM_QCOM_FEATURE_OPTIZOOM) > 0){
+        String8 optiZoomValues = createValuesStringFromMap(
+                OPTI_ZOOM_MODES_MAP,
+                PARAM_MAP_SIZE(OPTI_ZOOM_MODES_MAP));
+        set(KEY_QC_SUPPORTED_OPTI_ZOOM_MODES, optiZoomValues);
+        setOptiZoom(OPTI_ZOOM_OFF);
+    }
+
+    //Set HDR Type
+    uint32_t supported_hdr_modes = m_pCapability->qcom_supported_feature_mask &
+            (CAM_QCOM_FEATURE_SENSOR_HDR | CAM_QCOM_FEATURE_HDR);
+    if (supported_hdr_modes) {
+        if (CAM_QCOM_FEATURE_SENSOR_HDR == supported_hdr_modes) {
+            String8 hdrModeValues;
+            hdrModeValues.append(HDR_MODE_SENSOR);
+            set(KEY_QC_SUPPORTED_KEY_QC_HDR_MODES, hdrModeValues);
+            setHDRMode(HDR_MODE_SENSOR);
+        } else if (CAM_QCOM_FEATURE_HDR == supported_hdr_modes) {
+            String8 hdrModeValues;
+            hdrModeValues.append(HDR_MODE_MULTI_FRAME);
+            set(KEY_QC_SUPPORTED_KEY_QC_HDR_MODES, hdrModeValues);
+            setHDRMode(HDR_MODE_MULTI_FRAME);
+        } else {
+            String8 hdrModeValues = createValuesStringFromMap(
+                    HDR_MODES_MAP,
+                    PARAM_MAP_SIZE(HDR_MODES_MAP));
+            set(KEY_QC_SUPPORTED_KEY_QC_HDR_MODES, hdrModeValues);
+            setHDRMode(HDR_MODE_MULTI_FRAME);
+        }
+    }
+
+    //Set HDR need 1x
+    String8 hdrNeed1xValues;
+    if (!m_bHDRModeSensor) {
+        hdrNeed1xValues = createValuesStringFromMap(TRUE_FALSE_MODES_MAP,
+                PARAM_MAP_SIZE(TRUE_FALSE_MODES_MAP));
+        setHDRNeed1x(VALUE_TRUE);
+    } else {
+        hdrNeed1xValues.append(VALUE_FALSE);
+        setHDRNeed1x(VALUE_FALSE);
+    }
+    set(KEY_QC_SUPPORTED_HDR_NEED_1X, hdrNeed1xValues);
+
+    //Set True Portrait
+    if ((m_pCapability->qcom_supported_feature_mask & CAM_QCOM_FEATURE_TRUEPORTRAIT) > 0) {
+        String8 truePortraitValues = createValuesStringFromMap(
+                TRUE_PORTRAIT_MODES_MAP,
+                PARAM_MAP_SIZE(TRUE_PORTRAIT_MODES_MAP));
+        set(KEY_QC_SUPPORTED_TRUE_PORTRAIT_MODES, truePortraitValues);
+    }
+
+    // Set Denoise
+    if ((m_pCapability->qcom_supported_feature_mask & CAM_QCOM_FEATURE_DENOISE2D) > 0){
+    String8 denoiseValues = createValuesStringFromMap(
+        DENOISE_ON_OFF_MODES_MAP, PARAM_MAP_SIZE(DENOISE_ON_OFF_MODES_MAP));
+    set(KEY_QC_SUPPORTED_DENOISE, denoiseValues.string());
+#ifdef DEFAULT_DENOISE_MODE_ON
+    setWaveletDenoise(DENOISE_ON);
+#else
+    setWaveletDenoise(DENOISE_OFF);
+#endif
+    }
+
+    // Set feature enable/disable
+    String8 enableDisableValues = createValuesStringFromMap(
+            ENABLE_DISABLE_MODES_MAP, PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP));
+
+    // Set Lens Shading
+    set(KEY_QC_SUPPORTED_LENSSHADE_MODES, enableDisableValues);
+    setLensShadeValue(VALUE_ENABLE);
+    // Set MCE
+    set(KEY_QC_SUPPORTED_MEM_COLOR_ENHANCE_MODES, enableDisableValues);
+    setMCEValue(VALUE_ENABLE);
+
+    // Set DIS
+    set(KEY_QC_SUPPORTED_DIS_MODES, enableDisableValues);
+    setDISValue(VALUE_DISABLE);
+
+    // Set Histogram
+    set(KEY_QC_SUPPORTED_HISTOGRAM_MODES,
+        m_pCapability->histogram_supported ? enableDisableValues : "");
+    set(KEY_QC_HISTOGRAM, VALUE_DISABLE);
+
+    //Set Red Eye Reduction
+    set(KEY_QC_SUPPORTED_REDEYE_REDUCTION, enableDisableValues);
+    setRedeyeReduction(VALUE_DISABLE);
+
+    //Set SkinTone Enhancement
+    set(KEY_QC_SUPPORTED_SKIN_TONE_ENHANCEMENT_MODES, enableDisableValues);
+
+    // Set feature on/off
+    String8 onOffValues = createValuesStringFromMap(
+            ON_OFF_MODES_MAP, PARAM_MAP_SIZE(ON_OFF_MODES_MAP));
+
+    //Set See more (LLVD)
+    if (m_pCapability->qcom_supported_feature_mask &
+            CAM_QCOM_FEATURE_LLVD) {
+        set(KEY_QC_SUPPORTED_SEE_MORE_MODES, onOffValues);
+        setSeeMore(VALUE_OFF);
+    }
+
+    //Set Still more
+    if (m_pCapability->qcom_supported_feature_mask &
+            CAM_QCOM_FEATURE_STILLMORE) {
+        String8 stillMoreValues = createValuesStringFromMap(
+                STILL_MORE_MODES_MAP,
+                PARAM_MAP_SIZE(STILL_MORE_MODES_MAP));
+        set(KEY_QC_SUPPORTED_STILL_MORE_MODES, stillMoreValues);
+        setStillMore(STILL_MORE_OFF);
+    }
+
+    //Set Scene Detection
+    set(KEY_QC_SUPPORTED_SCENE_DETECT, onOffValues);
+    setSceneDetect(VALUE_OFF);
+    m_bHDREnabled = false;
+    m_bHDR1xFrameEnabled = true;
+
+    m_bHDRThumbnailProcessNeeded = false;
+    m_bHDR1xExtraBufferNeeded = true;
+    for (uint32_t i=0; i<m_pCapability->hdr_bracketing_setting.num_frames; i++) {
+        if (0 == m_pCapability->hdr_bracketing_setting.exp_val.values[i]) {
+            m_bHDR1xExtraBufferNeeded = false;
+            break;
+        }
+    }
+
+    // Set HDR output scaling
+    char value[PROPERTY_VALUE_MAX];
+
+    property_get("persist.camera.hdr.outcrop", value, VALUE_DISABLE);
+    if (strncmp(VALUE_ENABLE, value, sizeof(VALUE_ENABLE))) {
+      m_bHDROutputCropEnabled = false;
+    } else {
+      m_bHDROutputCropEnabled = true;
+    }
+
+    //Set Face Detection
+    set(KEY_QC_SUPPORTED_FACE_DETECTION, onOffValues);
+    set(KEY_QC_FACE_DETECTION, VALUE_OFF);
+
+    //Set Face Recognition
+    //set(KEY_QC_SUPPORTED_FACE_RECOGNITION, onOffValues);
+    //set(KEY_QC_FACE_RECOGNITION, VALUE_OFF);
+
+    //Set ZSL
+    set(KEY_QC_SUPPORTED_ZSL_MODES, onOffValues);
+#ifdef DEFAULT_ZSL_MODE_ON
+    set(KEY_QC_ZSL, VALUE_ON);
+    m_bZslMode = true;
+#else
+    set(KEY_QC_ZSL, VALUE_OFF);
+    m_bZslMode = false;
+#endif
+
+    // Check if zsl mode property is enabled.
+    // If yes, force the camera to be in zsl mode
+    memset(value, 0x00, PROPERTY_VALUE_MAX);
+    property_get("persist.camera.zsl.mode", value, "0");
+    int32_t zsl_mode = atoi(value);
+    if(zsl_mode == 1) {
+        CDBG_HIGH("%s: %d: Forcing Camera to ZSL mode ", __func__, __LINE__);
+        set(KEY_QC_ZSL, VALUE_ON);
+        m_bForceZslMode = true;
+        m_bZslMode = true;
+    }
+    m_bZslMode_new = m_bZslMode;
+
+    set(KEY_QC_SCENE_SELECTION, VALUE_DISABLE);
+
+    // Rdi mode
+    set(KEY_QC_SUPPORTED_RDI_MODES, enableDisableValues);
+    setRdiMode(VALUE_DISABLE);
+
+    // Secure mode
+    set(KEY_QC_SUPPORTED_SECURE_MODES, enableDisableValues);
+    setSecureMode(VALUE_DISABLE);
+
+    //Set video HDR
+    if ((m_pCapability->qcom_supported_feature_mask & CAM_QCOM_FEATURE_VIDEO_HDR) > 0) {
+        set(KEY_QC_SUPPORTED_VIDEO_HDR_MODES, onOffValues);
+        set(KEY_QC_VIDEO_HDR, VALUE_OFF);
+    }
+
+    //Set HW Sensor Snapshot HDR
+    if ((m_pCapability->qcom_supported_feature_mask & CAM_QCOM_FEATURE_SENSOR_HDR)> 0) {
+        set(KEY_QC_SUPPORTED_SENSOR_HDR_MODES, onOffValues);
+        set(KEY_QC_SENSOR_HDR, VALUE_OFF);
+        m_bSensorHDREnabled = false;
+    }
+
+    // Set VT TimeStamp
+    set(KEY_QC_VT_ENABLE, VALUE_DISABLE);
+    //Set Touch AF/AEC
+    String8 touchValues = createValuesStringFromMap(
+            TOUCH_AF_AEC_MODES_MAP, PARAM_MAP_SIZE(TOUCH_AF_AEC_MODES_MAP));
+
+    set(KEY_QC_SUPPORTED_TOUCH_AF_AEC, touchValues);
+    set(KEY_QC_TOUCH_AF_AEC, TOUCH_AF_AEC_OFF);
+
+    //set flip mode
+    if ((m_pCapability->qcom_supported_feature_mask & CAM_QCOM_FEATURE_FLIP) > 0) {
+        String8 flipModes = createValuesStringFromMap(
+                FLIP_MODES_MAP, PARAM_MAP_SIZE(FLIP_MODES_MAP));
+        set(KEY_QC_SUPPORTED_FLIP_MODES, flipModes);
+        set(KEY_QC_PREVIEW_FLIP, FLIP_MODE_OFF);
+        set(KEY_QC_VIDEO_FLIP, FLIP_MODE_OFF);
+        set(KEY_QC_SNAPSHOT_PICTURE_FLIP, FLIP_MODE_OFF);
+    }
+
+    // Set default Auto Exposure lock value
+    setAecLock(VALUE_FALSE);
+
+    // Set default AWB_LOCK lock value
+    setAwbLock(VALUE_FALSE);
+
+    // Set default Camera mode
+    set(KEY_QC_CAMERA_MODE, 0);
+
+    // Add support for internal preview restart
+    set(KEY_INTERNAL_PERVIEW_RESTART, VALUE_TRUE);
+    // Set default burst number
+    set(KEY_QC_SNAPSHOT_BURST_NUM, 0);
+    set(KEY_QC_NUM_RETRO_BURST_PER_SHUTTER, 0);
+
+    //Get RAM size and disable features which are memory rich
+    struct sysinfo info;
+    sysinfo(&info);
+
+    CDBG_HIGH("%s: totalram = %ld, freeram = %ld ", __func__, info.totalram,
+        info.freeram);
+    if (info.totalram > TOTAL_RAM_SIZE_512MB) {
+        set(KEY_QC_LONGSHOT_SUPPORTED, VALUE_TRUE);
+        set(KEY_QC_ZSL_HDR_SUPPORTED, VALUE_TRUE);
+    } else {
+        set(KEY_QC_LONGSHOT_SUPPORTED, VALUE_FALSE);
+        set(KEY_QC_ZSL_HDR_SUPPORTED, VALUE_FALSE);
+    }
+
+    setOfflineRAW();
+    memset(mStreamPpMask, 0, sizeof(uint32_t)*CAM_STREAM_TYPE_MAX);
+
+    int32_t rc = commitParameters();
+    if (rc == NO_ERROR) {
+        rc = setNumOfSnapshot();
+    }
+
+    //Set Video Rotation
+    String8 videoRotationValues = createValuesStringFromMap(VIDEO_ROTATION_MODES_MAP,
+            PARAM_MAP_SIZE(VIDEO_ROTATION_MODES_MAP));
+
+    set(KEY_QC_SUPPORTED_VIDEO_ROTATION_VALUES, videoRotationValues.string());
+    set(KEY_QC_VIDEO_ROTATION, VIDEO_ROTATION_0);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : init
+ *
+ * DESCRIPTION: initialize parameter obj
+ *
+ * PARAMETERS :
+ *   @capabilities  : ptr to camera capabilities
+ *   @mmops         : ptr to memory ops table for mapping/unmapping
+ *   @adjustFPS     : object reference for additional (possibly thermal)
+ *                    framerate adjustment
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::init(cam_capability_t *capabilities,
+        mm_camera_vtbl_t *mmOps, QCameraAdjustFPS *adjustFPS)
+{
+    int32_t rc = NO_ERROR;
+
+    m_pCapability = capabilities;
+    m_pCamOpsTbl = mmOps;
+    m_AdjustFPS = adjustFPS;
+
+    //Allocate Set Param Buffer
+    m_pParamHeap = new QCameraHeapMemory(QCAMERA_ION_USE_CACHE);
+    rc = m_pParamHeap->allocate(1, sizeof(parm_buffer_t), NON_SECURE);
+    if(rc != OK) {
+        rc = NO_MEMORY;
+        ALOGE("Failed to allocate SETPARM Heap memory");
+        goto TRANS_INIT_ERROR1;
+    }
+
+    //Map memory for parameters buffer
+    rc = m_pCamOpsTbl->ops->map_buf(m_pCamOpsTbl->camera_handle,
+                             CAM_MAPPING_BUF_TYPE_PARM_BUF,
+                             m_pParamHeap->getFd(0),
+                             sizeof(parm_buffer_t));
+    if(rc < 0) {
+        ALOGE("%s:failed to map SETPARM buffer",__func__);
+        rc = FAILED_TRANSACTION;
+        goto TRANS_INIT_ERROR2;
+    }
+    m_pParamBuf = (parm_buffer_t*) DATA_PTR(m_pParamHeap,0);
+
+    initDefaultParameters();
+
+    m_bInited = true;
+
+    goto TRANS_INIT_DONE;
+
+TRANS_INIT_ERROR2:
+    m_pParamHeap->deallocate();
+
+TRANS_INIT_ERROR1:
+    delete m_pParamHeap;
+    m_pParamHeap = NULL;
+
+TRANS_INIT_DONE:
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : deinit
+ *
+ * DESCRIPTION: deinitialize
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::deinit()
+{
+    if (!m_bInited) {
+        return;
+    }
+
+    //clear all entries in the map
+    String8 emptyStr;
+    QCameraParameters::unflatten(emptyStr);
+
+    if (NULL != m_pCamOpsTbl) {
+        m_pCamOpsTbl->ops->unmap_buf(
+                             m_pCamOpsTbl->camera_handle,
+                             CAM_MAPPING_BUF_TYPE_PARM_BUF);
+        m_pCamOpsTbl = NULL;
+    }
+    m_pCapability = NULL;
+    if (NULL != m_pParamHeap) {
+        m_pParamHeap->deallocate();
+        delete m_pParamHeap;
+        m_pParamHeap = NULL;
+        m_pParamBuf = NULL;
+    }
+
+    m_AdjustFPS = NULL;
+
+    m_tempMap.clear();
+
+    m_bInited = false;
+}
+
+/*===========================================================================
+ * FUNCTION   : parse_pair
+ *
+ * DESCRIPTION: helper function to parse string like "640x480" or "10000,20000"
+ *
+ * PARAMETERS :
+ *   @str     : input string to be parse
+ *   @first   : [output] first value of the pair
+ *   @second  : [output]  second value of the pair
+ *   @delim   : [input] delimeter to seperate the pair
+ *   @endptr  : [output] ptr to the end of the pair string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::parse_pair(const char *str,
+                                      int *first,
+                                      int *second,
+                                      char delim,
+                                      char **endptr = NULL)
+{
+    // Find the first integer.
+    char *end;
+    int w = (int)strtol(str, &end, 10);
+    // If a delimeter does not immediately follow, give up.
+    if (*end != delim) {
+        ALOGE("Cannot find delimeter (%c) in str=%s", delim, str);
+        return BAD_VALUE;
+    }
+
+    // Find the second integer, immediately after the delimeter.
+    int h = (int)strtol(end+1, &end, 10);
+
+    *first = w;
+    *second = h;
+
+    if (endptr) {
+        *endptr = end;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : parseSizesList
+ *
+ * DESCRIPTION: helper function to parse string containing sizes
+ *
+ * PARAMETERS :
+ *   @sizesStr: [input] input string to be parse
+ *   @sizes   : [output] reference to store parsed sizes
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::parseSizesList(const char *sizesStr, Vector<Size> &sizes)
+{
+    if (sizesStr == 0) {
+        return;
+    }
+
+    char *sizeStartPtr = (char *)sizesStr;
+
+    while (true) {
+        int width, height;
+        int success = parse_pair(sizeStartPtr, &width, &height, 'x',
+                                 &sizeStartPtr);
+        if (success == -1 || (*sizeStartPtr != ',' && *sizeStartPtr != '\0')) {
+            ALOGE("Picture sizes string \"%s\" contains invalid character.", sizesStr);
+            return;
+        }
+        sizes.push(Size(width, height));
+
+        if (*sizeStartPtr == '\0') {
+            return;
+        }
+        sizeStartPtr++;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getSupportedHfrSizes
+ *
+ * DESCRIPTION: return supported HFR sizes
+ *
+ * PARAMETERS :
+ *   @sizes  : [output] reference to a vector storing supported HFR sizes
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::getSupportedHfrSizes(Vector<Size> &sizes)
+{
+    const char *hfrSizesStr = get(KEY_QC_SUPPORTED_HFR_SIZES);
+    parseSizesList(hfrSizesStr, sizes);
+}
+
+/*===========================================================================
+ * FUNCTION   : adjustPreviewFpsRanges
+ *
+ * DESCRIPTION: adjust preview FPS ranges
+ *              according to external events
+ *
+ * PARAMETERS :
+ *   @minFPS  : min FPS value
+ *   @maxFPS  : max FPS value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::adjustPreviewFpsRange(cam_fps_range_t *fpsRange)
+{
+    if ( fpsRange == NULL ) {
+        return BAD_VALUE;
+    }
+
+    if ( m_pParamBuf == NULL ) {
+        return NO_INIT;
+    }
+
+    int32_t rc = initBatchUpdate(m_pParamBuf);
+    if ( rc != NO_ERROR ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return rc;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_FPS_RANGE, *fpsRange)) {
+        ALOGE("%s: Parameters batch failed",__func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if ( rc != NO_ERROR ) {
+        ALOGE("%s:Failed to commit batch parameters", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setPreviewFpsRanges
+ *
+ * DESCRIPTION: set preview FPS ranges
+ *
+ * PARAMETERS :
+ *   @minFPS  : min FPS value
+ *   @maxFPS  : max FPS value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setPreviewFpsRange(int min_fps,
+        int max_fps, int vid_min_fps,int vid_max_fps)
+{
+    char str[32];
+    char value[PROPERTY_VALUE_MAX];
+    int fixedFpsValue;
+    /*This property get value should be the fps that user needs*/
+    property_get("persist.debug.set.fixedfps", value, "0");
+    fixedFpsValue = atoi(value);
+
+    CDBG("%s: E minFps = %d, maxFps = %d , vid minFps = %d, vid maxFps = %d",
+                __func__, min_fps, max_fps, vid_min_fps, vid_max_fps);
+
+    if(fixedFpsValue != 0) {
+      min_fps = (int)fixedFpsValue*1000;
+      max_fps = (int)fixedFpsValue*1000;
+    }
+    snprintf(str, sizeof(str), "%d,%d", min_fps, max_fps);
+    CDBG_HIGH("%s: Setting preview fps range %s", __func__, str);
+    updateParamEntry(KEY_PREVIEW_FPS_RANGE, str);
+    cam_fps_range_t fps_range;
+    memset(&fps_range, 0x00, sizeof(cam_fps_range_t));
+    fps_range.min_fps = (float)min_fps / 1000.0f;
+    fps_range.max_fps = (float)max_fps / 1000.0f;
+    fps_range.video_min_fps = (float)vid_min_fps / 1000.0f;
+    fps_range.video_max_fps = (float)vid_max_fps / 1000.0f;
+
+    CDBG_HIGH("%s: Updated: minFps = %d, maxFps = %d ,"
+            " vid minFps = %d, vid maxFps = %d",
+            __func__, min_fps, max_fps, vid_min_fps, vid_max_fps);
+
+    if ( NULL != m_AdjustFPS ) {
+        m_AdjustFPS->recalcFPSRange(min_fps, max_fps, fps_range);
+        CDBG_HIGH("%s: Thermal adjusted Preview fps range %3.2f,%3.2f, %3.2f, %3.2f",
+              __func__, fps_range.min_fps, fps_range.max_fps,
+              fps_range.video_min_fps, fps_range.video_max_fps);
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_FPS_RANGE, fps_range)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+
+
+/*===========================================================================
+ * FUNCTION   : setAutoExposure
+ *
+ * DESCRIPTION: set auto exposure
+ *
+ * PARAMETERS :
+ *   @autoExp : auto exposure value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAutoExposure(const char *autoExp)
+{
+    if (autoExp != NULL) {
+        int32_t value = lookupAttr(AUTO_EXPOSURE_MAP, PARAM_MAP_SIZE(AUTO_EXPOSURE_MAP), autoExp);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting auto exposure %s", __func__, autoExp);
+            updateParamEntry(KEY_QC_AUTO_EXPOSURE, autoExp);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_AEC_ALGO_TYPE, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid auto exposure value: %s", (autoExp == NULL) ? "NULL" : autoExp);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setEffect
+ *
+ * DESCRIPTION: set effect
+ *
+ * PARAMETERS :
+ *   @effect  : effect value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setEffect(const char *effect)
+{
+    if (effect != NULL) {
+        int32_t value = lookupAttr(EFFECT_MODES_MAP, PARAM_MAP_SIZE(EFFECT_MODES_MAP), effect);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting effect %s", __func__, effect);
+            updateParamEntry(KEY_EFFECT, effect);
+            uint8_t prmEffect = static_cast<uint8_t>(value);
+            mParmEffect = prmEffect;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_EFFECT, prmEffect)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid effect value: %s", (effect == NULL) ? "NULL" : effect);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setBrightness
+ *
+ * DESCRIPTION: set brightness control value
+ *
+ * PARAMETERS :
+ *   @brightness  : brightness control value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setBrightness(int brightness)
+{
+    char val[16];
+    snprintf(val, sizeof(val), "%d", brightness);
+    updateParamEntry(KEY_QC_BRIGHTNESS, val);
+
+    CDBG_HIGH("%s: Setting brightness %s", __func__, val);
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_BRIGHTNESS, brightness)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFocusMode
+ *
+ * DESCRIPTION: set focus mode
+ *
+ * PARAMETERS :
+ *   @focusMode  : focus mode value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFocusMode(const char *focusMode)
+{
+    if (focusMode != NULL) {
+        int32_t value = lookupAttr(FOCUS_MODES_MAP, PARAM_MAP_SIZE(FOCUS_MODES_MAP), focusMode);
+        if (value != NAME_NOT_FOUND) {
+            int32_t rc = NO_ERROR;
+            CDBG_HIGH("%s: Setting focus mode %s", __func__, focusMode);
+            mFocusMode = (cam_focus_mode_type)value;
+
+            updateParamEntry(KEY_FOCUS_MODE, focusMode);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf,
+                    CAM_INTF_PARM_FOCUS_MODE, (uint8_t)value)) {
+                rc = BAD_VALUE;
+            }
+            if (strcmp(focusMode,"infinity")==0){
+                set(QCameraParameters::KEY_FOCUS_DISTANCES, "Infinity,Infinity,Infinity");
+            }
+            return rc;
+        }
+    }
+    ALOGE("Invalid focus mode value: %s", (focusMode == NULL) ? "NULL" : focusMode);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFocusPosition
+ *
+ * DESCRIPTION: set focus position
+ *
+ * PARAMETERS :
+ *   @typeStr : focus position type, index or dac_code
+ *   @posStr : focus positon.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setFocusPosition(const char *typeStr, const char *posStr)
+{
+    CDBG_HIGH("%s, type:%s, pos: %s", __func__, typeStr, posStr);
+    int32_t type = atoi(typeStr);
+    float pos = (float) atof(posStr);
+
+    if ((type >= CAM_MANUAL_FOCUS_MODE_INDEX) &&
+            (type < CAM_MANUAL_FOCUS_MODE_MAX)) {
+        // get max and min focus position from m_pCapability
+        int32_t minFocusPos = (int32_t) m_pCapability->min_focus_pos[type];
+        int32_t maxFocusPos = (int32_t) m_pCapability->max_focus_pos[type];
+        CDBG_HIGH("%s, focusPos min: %d, max: %d", __func__, minFocusPos, maxFocusPos);
+
+        if (pos >= minFocusPos && pos <= maxFocusPos) {
+            updateParamEntry(KEY_QC_MANUAL_FOCUS_POS_TYPE, typeStr);
+            updateParamEntry(KEY_QC_MANUAL_FOCUS_POSITION, posStr);
+
+            cam_manual_focus_parm_t manual_focus;
+            manual_focus.flag = (cam_manual_focus_mode_type)type;
+            if (manual_focus.flag == CAM_MANUAL_FOCUS_MODE_DIOPTER) {
+                manual_focus.af_manual_diopter = pos;
+            } else if (manual_focus.flag == CAM_MANUAL_FOCUS_MODE_RATIO) {
+                manual_focus.af_manual_lens_position_ratio = (int32_t) pos;
+            } else if (manual_focus.flag == CAM_MANUAL_FOCUS_MODE_INDEX) {
+                manual_focus.af_manual_lens_position_index = (int32_t) pos;
+            } else {
+                manual_focus.af_manual_lens_position_dac = (int32_t) pos;
+            }
+
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_MANUAL_FOCUS_POS,
+                    manual_focus)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+
+    ALOGE("%s, invalid params, type:%d, pos: %d", __func__, type, pos);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : updateAEInfo
+ *
+ * DESCRIPTION: update exposure information from metadata callback
+ *
+ * PARAMETERS :
+ *   @ae_params : auto exposure params
+ *
+ * RETURN     : void
+ *==========================================================================*/
+void  QCameraParameters::updateAEInfo(cam_3a_params_t &ae_params)
+{
+    const char *prevExpTime = get(KEY_QC_CURRENT_EXPOSURE_TIME);
+    char newExpTime[15];
+    snprintf(newExpTime, sizeof(newExpTime), "%f", ae_params.exp_time*1000.0);
+
+    if (prevExpTime == NULL || strcmp(prevExpTime, newExpTime)) {
+        CDBG("update exposure time: old: %s, new: %s", prevExpTime, newExpTime);
+        set(KEY_QC_CURRENT_EXPOSURE_TIME, newExpTime);
+    }
+
+    int32_t prevISO = getInt(KEY_QC_CURRENT_ISO);
+    int32_t newISO = ae_params.iso_value;
+    if (prevISO != newISO) {
+        CDBG("update iso: old:%d, new:%d", prevISO, newISO);
+        set(KEY_QC_CURRENT_ISO, newISO);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : updateCurrentFocusPosition
+ *
+ * DESCRIPTION: update current focus position from metadata callback
+ *
+ * PARAMETERS :
+ *   @pos : current focus position
+ *
+ * RETURN     : void
+ *==========================================================================*/
+void  QCameraParameters::updateCurrentFocusPosition(cam_focus_pos_info_t &cur_pos_info)
+{
+    int prevScalePos = getInt(KEY_QC_FOCUS_POSITION_SCALE);
+    int newScalePos = (int) cur_pos_info.scale;
+    if (prevScalePos != newScalePos) {
+        CDBG("update focus scale: old:%d, new:%d", prevScalePos, newScalePos);
+        set(KEY_QC_FOCUS_POSITION_SCALE, newScalePos);
+    }
+
+    float prevDiopterPos = getFloat(KEY_QC_FOCUS_POSITION_DIOPTER);
+    float newDiopterPos = cur_pos_info.diopter;
+    if (prevDiopterPos != newDiopterPos) {
+        CDBG("update focus diopter: old:%f, new:%f", prevDiopterPos, newDiopterPos);
+        setFloat(KEY_QC_FOCUS_POSITION_DIOPTER, newDiopterPos);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setSharpness
+ *
+ * DESCRIPTION: set sharpness control value
+ *
+ * PARAMETERS :
+ *   @sharpness  : sharpness control value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSharpness(int sharpness)
+{
+    char val[16];
+    snprintf(val, sizeof(val), "%d", sharpness);
+    updateParamEntry(KEY_QC_SHARPNESS, val);
+    CDBG_HIGH("%s: Setting sharpness %s", __func__, val);
+    m_nSharpness = sharpness;
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_SHARPNESS, m_nSharpness)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSkinToneEnhancement
+ *
+ * DESCRIPTION: set skin tone enhancement value
+ *
+ * PARAMETERS :
+ *   @sceFactore  : skin tone enhancement factor value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSkinToneEnhancement(int sceFactor)
+{
+    char val[16];
+    snprintf(val, sizeof(val), "%d", sceFactor);
+    updateParamEntry(KEY_QC_SCE_FACTOR, val);
+    CDBG_HIGH("%s: Setting skintone enhancement %s", __func__, val);
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_SCE_FACTOR, sceFactor)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSaturation
+ *
+ * DESCRIPTION: set saturation control value
+ *
+ * PARAMETERS :
+ *   @saturation : saturation control value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSaturation(int saturation)
+{
+    char val[16];
+    snprintf(val, sizeof(val), "%d", saturation);
+    updateParamEntry(KEY_QC_SATURATION, val);
+    CDBG_HIGH("%s: Setting saturation %s", __func__, val);
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_SATURATION, saturation)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setContrast
+ *
+ * DESCRIPTION: set contrast control value
+ *
+ * PARAMETERS :
+ *   @contrast : contrast control value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setContrast(int contrast)
+{
+    char val[16];
+    snprintf(val, sizeof(val), "%d", contrast);
+    updateParamEntry(KEY_QC_CONTRAST, val);
+    CDBG_HIGH("%s: Setting contrast %s", __func__, val);
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CONTRAST, contrast)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSceneDetect
+ *
+ * DESCRIPTION: set scenen detect value
+ *
+ * PARAMETERS :
+ *   @sceneDetect  : scene detect value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSceneDetect(const char *sceneDetect)
+{
+    if (sceneDetect != NULL) {
+        int32_t value = lookupAttr(ON_OFF_MODES_MAP, PARAM_MAP_SIZE(ON_OFF_MODES_MAP),
+                sceneDetect);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting Scene Detect %s", __func__, sceneDetect);
+            updateParamEntry(KEY_QC_SCENE_DETECT, sceneDetect);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_ASD_ENABLE, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid Scene Detect value: %s",
+          (sceneDetect == NULL) ? "NULL" : sceneDetect);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSensorSnapshotHDR
+ *
+ * DESCRIPTION: set snapshot HDR value
+ *
+ * PARAMETERS :
+ *   @snapshotHDR  : snapshot HDR value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSensorSnapshotHDR(const char *snapshotHDR)
+{
+    if (snapshotHDR != NULL) {
+        int32_t value = lookupAttr(ON_OFF_MODES_MAP, PARAM_MAP_SIZE(ON_OFF_MODES_MAP),
+                snapshotHDR);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting Sensor Snapshot HDR %s", __func__, snapshotHDR);
+            updateParamEntry(KEY_QC_SENSOR_HDR, snapshotHDR);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_SENSOR_HDR, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid Snapshot HDR value: %s",
+          (snapshotHDR == NULL) ? "NULL" : snapshotHDR);
+    return BAD_VALUE;
+
+}
+
+
+/*===========================================================================
+ * FUNCTION   : setVideoHDR
+ *
+ * DESCRIPTION: set video HDR value
+ *
+ * PARAMETERS :
+ *   @videoHDR  : svideo HDR value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setVideoHDR(const char *videoHDR)
+{
+    if (videoHDR != NULL) {
+        int32_t value = lookupAttr(ON_OFF_MODES_MAP, PARAM_MAP_SIZE(ON_OFF_MODES_MAP), videoHDR);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting Video HDR %s", __func__, videoHDR);
+            updateParamEntry(KEY_QC_VIDEO_HDR, videoHDR);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_VIDEO_HDR, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid Video HDR value: %s",
+          (videoHDR == NULL) ? "NULL" : videoHDR);
+    return BAD_VALUE;
+}
+
+
+
+/*===========================================================================
+ * FUNCTION   : setVtEnable
+ *
+ * DESCRIPTION: set vt Enable value
+ *
+ * PARAMETERS :
+ *   @videoHDR  : svtEnable value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setVtEnable(const char *vtEnable)
+{
+    if (vtEnable != NULL) {
+        int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+                PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), vtEnable);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting Vt Enable %s", __func__, vtEnable);
+            m_bAVTimerEnabled = true;
+            updateParamEntry(KEY_QC_VT_ENABLE, vtEnable);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_VT, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid Vt Enable value: %s",
+          (vtEnable == NULL) ? "NULL" : vtEnable);
+    m_bAVTimerEnabled = false;
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFaceRecognition
+ *
+ * DESCRIPTION: set face recognition value
+ *
+ * PARAMETERS :
+ *   @faceRecog  : face recognition value string
+ *   @maxFaces   : number of max faces to be detected/recognized
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFaceRecognition(const char *faceRecog,
+        uint32_t maxFaces)
+{
+    if (faceRecog != NULL) {
+        int32_t value = lookupAttr(ON_OFF_MODES_MAP, PARAM_MAP_SIZE(ON_OFF_MODES_MAP), faceRecog);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting face recognition %s", __func__, faceRecog);
+            updateParamEntry(KEY_QC_FACE_RECOGNITION, faceRecog);
+
+            uint32_t faceProcMask = m_nFaceProcMask;
+            if (value > 0) {
+                faceProcMask |= CAM_FACE_PROCESS_MASK_RECOGNITION;
+            } else {
+                faceProcMask &= (uint32_t)(~CAM_FACE_PROCESS_MASK_RECOGNITION);
+            }
+
+            if(m_nFaceProcMask == faceProcMask) {
+                CDBG_HIGH("%s: face process mask not changed, no ops here", __func__);
+                return NO_ERROR;
+            }
+            m_nFaceProcMask = faceProcMask;
+            CDBG_HIGH("%s: FaceProcMask -> %d", __func__, m_nFaceProcMask);
+
+            // set parm for face process
+            cam_fd_set_parm_t fd_set_parm;
+            memset(&fd_set_parm, 0, sizeof(cam_fd_set_parm_t));
+            fd_set_parm.fd_mode = m_nFaceProcMask;
+            fd_set_parm.num_fd = maxFaces;
+
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_FD, fd_set_parm)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid face recognition value: %s", (faceRecog == NULL) ? "NULL" : faceRecog);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setZoom
+ *
+ * DESCRIPTION: set zoom level
+ *
+ * PARAMETERS :
+ *   @zoom_level : zoom level
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setZoom(int zoom_level)
+{
+    char val[16];
+    snprintf(val, sizeof(val), "%d", zoom_level);
+    updateParamEntry(KEY_ZOOM, val);
+    CDBG_HIGH("%s: zoom level: %d", __func__, zoom_level);
+    mZoomLevel = zoom_level;
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_ZOOM, zoom_level)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setISOValue
+ *
+ * DESCRIPTION: set ISO value
+ *
+ * PARAMETERS :
+ *   @isoValue : ISO value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setISOValue(const char *isoValue)
+{
+    if (isoValue != NULL) {
+        if (!strcmp(isoValue, ISO_MANUAL)) {
+            CDBG("%s, iso manual mode - use continuous iso", __func__);
+            updateParamEntry(KEY_QC_ISO_MODE, isoValue);
+            return NO_ERROR;
+        }
+        int32_t value = lookupAttr(ISO_MODES_MAP, PARAM_MAP_SIZE(ISO_MODES_MAP), isoValue);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting ISO value %s", __func__, isoValue);
+            updateParamEntry(KEY_QC_ISO_MODE, isoValue);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_ISO, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid ISO value: %s",
+          (isoValue == NULL) ? "NULL" : isoValue);
+    return BAD_VALUE;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : setContinuousISO
+ *
+ * DESCRIPTION: set continuous ISO value
+ *
+ * PARAMETERS :
+ *   @params : ISO value parameter
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setContinuousISO(const QCameraParameters& params)
+{
+    const char *iso = params.get(KEY_QC_ISO_MODE);
+    CDBG("%s, current iso mode: %s", __func__, iso);
+
+    if (iso != NULL) {
+        if (strcmp(iso, ISO_MANUAL)) {
+            CDBG("%s, dont set iso to back-end.", __func__);
+            return NO_ERROR;
+        }
+    }
+
+    const char *str = params.get(KEY_QC_CONTINUOUS_ISO);
+    const char *prev_str = get(KEY_QC_CONTINUOUS_ISO);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setContinuousISO(str);
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setExposureTime
+ *
+ * DESCRIPTION: set exposure time
+ *
+ * PARAMETERS :
+ *   @expTimeStr : string of exposure time in ms
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setExposureTime(const char *expTimeStr)
+{
+    if (expTimeStr != NULL) {
+        double expTimeMs = atof(expTimeStr);
+        //input is in milli seconds. Convert to nano sec for backend
+        int64_t expTimeNs = ((int64_t)expTimeMs)*1000000L;
+
+        // expTime == 0 means not to use manual exposure time.
+        if ((0 <= expTimeNs) &&
+                ((expTimeNs == 0) ||
+                ((expTimeNs >= m_pCapability->exposure_time_range[0]) &&
+                (expTimeNs <= m_pCapability->exposure_time_range[1])))) {
+            CDBG_HIGH("%s, exposure time: %f ms", __func__, expTimeMs);
+            updateParamEntry(KEY_QC_EXPOSURE_TIME, expTimeStr);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_EXPOSURE_TIME,
+                    (uint64_t)expTimeNs)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+
+    ALOGE("Invalid exposure time, value: %s",
+          (expTimeStr == NULL) ? "NULL" : expTimeStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setLongshotEnable
+ *
+ * DESCRIPTION: set a flag indicating longshot mode
+ *
+ * PARAMETERS :
+ *   @enable  : true - Longshot enabled
+ *              false - Longshot disabled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setLongshotEnable(bool enable)
+{
+    int32_t rc = NO_ERROR;
+    int8_t value = enable ? 1 : 0;
+
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_LONGSHOT_ENABLE, value)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to parameter changes", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFlash
+ *
+ * DESCRIPTION: set flash mode
+ *
+ * PARAMETERS :
+ *   @flashStr : LED flash mode value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFlash(const char *flashStr)
+{
+    if (flashStr != NULL) {
+        int32_t value = lookupAttr(FLASH_MODES_MAP, PARAM_MAP_SIZE(FLASH_MODES_MAP), flashStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting Flash value %s", __func__, flashStr);
+            updateParamEntry(KEY_FLASH_MODE, flashStr);
+            mFlashValue = value;
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid flash value: %s", (flashStr == NULL) ? "NULL" : flashStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : updateFlashMode
+ *
+ * DESCRIPTION: update flash mode
+ *
+ * PARAMETERS :
+ *   @flashStr : LED flash mode value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::updateFlashMode(cam_flash_mode_t flash_mode)
+{
+    int32_t rc = NO_ERROR;
+    if (flash_mode >= CAM_FLASH_MODE_MAX) {
+        CDBG_HIGH("%s: Error!! Invalid flash mode (%d)", __func__, flash_mode);
+        return BAD_VALUE;
+    }
+    CDBG_HIGH("%s: Setting Flash mode from EZTune %d", __func__, flash_mode);
+
+    const char *flash_mode_str = lookupNameByValue(FLASH_MODES_MAP,
+            PARAM_MAP_SIZE(FLASH_MODES_MAP), flash_mode);
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+    rc = setFlash(flash_mode_str);
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to update Flash mode", __func__);
+        return rc;
+    }
+
+    CDBG_HIGH("%s: Setting Flash mode %d", __func__, mFlashValue);
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_LED_MODE, mFlashValue)) {
+        ALOGE("%s:Failed to set led mode", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to commit parameters", __func__);
+        return rc;
+    }
+
+    return NO_ERROR;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : configureFlash
+ *
+ * DESCRIPTION: configure Flash Bracketing.
+ *
+ * PARAMETERS :
+ *    @frame_config : output configaration structure to fill in.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::configureFlash(cam_capture_frame_config_t &frame_config)
+{
+    CDBG_HIGH("%s: E",__func__);
+    int32_t rc = NO_ERROR;
+    uint32_t i = 0;
+
+    if (isChromaFlashEnabled()) {
+
+        rc = setToneMapMode(false, false);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Failed to configure tone map", __func__);
+            return rc;
+        }
+
+        rc = setCDSMode(CAM_CDS_MODE_OFF, false);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Failed to configure csd mode", __func__);
+            return rc;
+        }
+
+        CDBG_HIGH("%s : Enable Chroma Flash capture", __func__);
+        cam_flash_mode_t flash_mode = CAM_FLASH_MODE_OFF;
+        frame_config.num_batch =
+                m_pCapability->chroma_flash_settings_need.burst_count;
+        if (frame_config.num_batch > CAM_MAX_FLASH_BRACKETING) {
+            frame_config.num_batch = CAM_MAX_FLASH_BRACKETING;
+        }
+        for (i = 0; i < frame_config.num_batch; i++) {
+            flash_mode = (m_pCapability->chroma_flash_settings_need.flash_bracketing[i]) ?
+                    CAM_FLASH_MODE_ON:CAM_FLASH_MODE_OFF;
+            frame_config.configs[i].num_frames = 1;
+            frame_config.configs[i].type = CAM_CAPTURE_FLASH;
+            frame_config.configs[i].flash_mode = flash_mode;
+        }
+    } else if (mFlashValue != CAM_FLASH_MODE_OFF) {
+        frame_config.num_batch = 1;
+        for (i = 0; i < frame_config.num_batch; i++) {
+            frame_config.configs[i].num_frames = 1;
+            frame_config.configs[i].type = CAM_CAPTURE_FLASH;
+            frame_config.configs[i].flash_mode =(cam_flash_mode_t)mFlashValue;
+        }
+    }
+
+    CDBG("%s: Chroma Flash cnt = %d", __func__,frame_config.num_batch);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureHDRBracketing
+ *
+ * DESCRIPTION: configure HDR Bracketing.
+ *
+ * PARAMETERS :
+ *    @frame_config : output configaration structure to fill in.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::configureHDRBracketing(cam_capture_frame_config_t &frame_config)
+{
+    CDBG_HIGH("%s: E",__func__);
+    int32_t rc = NO_ERROR;
+    uint32_t i = 0;
+
+    uint32_t hdrFrameCount = m_pCapability->hdr_bracketing_setting.num_frames;
+    CDBG_HIGH("%s : HDR values %d, %d frame count: %u",
+          __func__,
+          (int8_t) m_pCapability->hdr_bracketing_setting.exp_val.values[0],
+          (int8_t) m_pCapability->hdr_bracketing_setting.exp_val.values[1],
+          hdrFrameCount);
+
+    frame_config.num_batch = hdrFrameCount;
+
+    cam_bracket_mode mode =
+            m_pCapability->hdr_bracketing_setting.exp_val.mode;
+    if (mode == CAM_EXP_BRACKETING_ON) {
+        rc = setToneMapMode(false, true);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Failed to disable tone map during HDR", __func__);
+        }
+    }
+    for (i = 0; i < frame_config.num_batch; i++) {
+        frame_config.configs[i].num_frames = 1;
+        frame_config.configs[i].type = CAM_CAPTURE_BRACKETING;
+        frame_config.configs[i].hdr_mode.mode = mode;
+        frame_config.configs[i].hdr_mode.values =
+                m_pCapability->hdr_bracketing_setting.exp_val.values[i];
+        CDBG("%s: exp values %d", __func__,
+                (int)frame_config.configs[i].hdr_mode.values);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureAEBracketing
+ *
+ * DESCRIPTION: configure AE Bracketing.
+ *
+ * PARAMETERS :
+ *    @frame_config : output configaration structure to fill in.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::configureAEBracketing(cam_capture_frame_config_t &frame_config)
+{
+    CDBG_HIGH("%s: E",__func__);
+    int32_t rc = NO_ERROR;
+    uint32_t i = 0;
+    char exp_value[MAX_EXP_BRACKETING_LENGTH];
+
+    rc = setToneMapMode(false, true);
+    if (rc != NO_ERROR) {
+        CDBG_HIGH("%s: Failed to disable tone map during AEBracketing", __func__);
+    }
+
+    uint32_t burstCount = 0;
+    const char *str_val = m_AEBracketingClient.values;
+    if ((str_val != NULL) && (strlen(str_val) > 0)) {
+        char prop[PROPERTY_VALUE_MAX];
+        memset(prop, 0, sizeof(prop));
+        strlcpy(prop, str_val, PROPERTY_VALUE_MAX);
+        char *saveptr = NULL;
+        char *token = strtok_r(prop, ",", &saveptr);
+        if (token != NULL) {
+            exp_value[burstCount++] = (char)atoi(token);
+            while (token != NULL) {
+                token = strtok_r(NULL, ",", &saveptr);
+                if (token != NULL) {
+                    exp_value[burstCount++] = (char)atoi(token);
+                }
+            }
+        }
+    }
+
+    frame_config.num_batch = burstCount;
+    cam_bracket_mode mode = m_AEBracketingClient.mode;
+
+    for (i = 0; i < frame_config.num_batch; i++) {
+        frame_config.configs[i].num_frames = 1;
+        frame_config.configs[i].type = CAM_CAPTURE_BRACKETING;
+        frame_config.configs[i].hdr_mode.mode = mode;
+        frame_config.configs[i].hdr_mode.values =
+                m_AEBracketingClient.values[i];
+        CDBG("%s: exp values %d", __func__, (int)m_AEBracketingClient.values[i]);
+    }
+
+    CDBG_HIGH("%s: num_frame = %d X",__func__, burstCount);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configFrameCapture
+ *
+ * DESCRIPTION: configuration for ZSL special captures (FLASH/HDR etc)
+ *
+ * PARAMETERS :
+ *   @commitSettings : flag to enable or disable commit this this settings
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::configFrameCapture(bool commitSettings)
+{
+    int32_t rc = NO_ERROR;
+    memset(&m_captureFrameConfig, 0, sizeof(cam_capture_frame_config_t));
+
+    if (commitSettings) {
+        if(initBatchUpdate(m_pParamBuf) < 0 ) {
+            ALOGE("%s:Failed to initialize group update table", __func__);
+            return BAD_TYPE;
+        }
+    }
+
+    if (isChromaFlashEnabled() || mFlashValue != CAM_FLASH_MODE_OFF) {
+        configureFlash(m_captureFrameConfig);
+    } else if(isHDREnabled()) {
+        configureHDRBracketing (m_captureFrameConfig);
+    } else if(isAEBracketEnabled()) {
+        configureAEBracketing (m_captureFrameConfig);
+    }
+
+    rc = ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CAPTURE_FRAME_CONFIG,
+            (cam_capture_frame_config_t)m_captureFrameConfig);
+    if (rc != NO_ERROR) {
+        rc = BAD_VALUE;
+        ALOGE("%s:Failed to set capture settings", __func__);
+        return rc;
+    }
+
+    if (commitSettings) {
+        rc = commitSetBatch();
+        if (rc != NO_ERROR) {
+            ALOGE("%s:Failed to commit parameters", __func__);
+            return rc;
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : resetFrameCapture
+ *
+ * DESCRIPTION: reset special captures settings(FLASH/HDR etc)
+ *
+ * PARAMETERS :
+ *   @commitSettings : flag to enable or disable commit this this settings
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::resetFrameCapture(bool commitSettings)
+{
+    int32_t rc = NO_ERROR, i = 0;
+    memset(&m_captureFrameConfig, 0, sizeof(cam_capture_frame_config_t));
+
+    if (commitSettings) {
+        if(initBatchUpdate(m_pParamBuf) < 0 ) {
+            ALOGE("%s:Failed to initialize group update table", __func__);
+            return BAD_TYPE;
+        }
+    }
+
+    if (isHDREnabled() || isAEBracketEnabled()) {
+        rc = setToneMapMode(true, true);
+        if (rc != NO_ERROR) {
+            CDBG_HIGH("%s: Failed to enable tone map during HDR/AEBracketing", __func__);
+        }
+        rc = stopAEBracket();
+    } else if (isChromaFlashEnabled()) {
+        rc = setToneMapMode(true, false);
+        if (rc != NO_ERROR) {
+            CDBG_HIGH("%s: Failed to enable tone map during chroma flash", __func__);
+        }
+
+        rc = setCDSMode(mCds_mode, false);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Failed to configure csd mode", __func__);
+            return rc;
+        }
+    }
+
+    rc = ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CAPTURE_FRAME_CONFIG,
+            (cam_capture_frame_config_t)m_captureFrameConfig);
+    if (rc != NO_ERROR) {
+        rc = BAD_VALUE;
+        ALOGE("%s:Failed to set capture settings", __func__);
+        return rc;
+    }
+
+    if (commitSettings) {
+        rc = commitSetBatch();
+        if (rc != NO_ERROR) {
+            ALOGE("%s:Failed to commit parameters", __func__);
+            return rc;
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAecLock
+ *
+ * DESCRIPTION: set AEC lock value
+ *
+ * PARAMETERS :
+ *   @aecLockStr : AEC lock value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAecLock(const char *aecLockStr)
+{
+    if (aecLockStr != NULL) {
+        int32_t value = lookupAttr(TRUE_FALSE_MODES_MAP, PARAM_MAP_SIZE(TRUE_FALSE_MODES_MAP),
+                aecLockStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting AECLock value %s", __func__, aecLockStr);
+            updateParamEntry(KEY_AUTO_EXPOSURE_LOCK, aecLockStr);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf,
+                    CAM_INTF_PARM_AEC_LOCK, (uint32_t)value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid AECLock value: %s",
+        (aecLockStr == NULL) ? "NULL" : aecLockStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAwbLock
+ *
+ * DESCRIPTION: set AWB lock value
+ *
+ * PARAMETERS :
+ *   @awbLockStr : AWB lock value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAwbLock(const char *awbLockStr)
+{
+    if (awbLockStr != NULL) {
+        int32_t value = lookupAttr(TRUE_FALSE_MODES_MAP, PARAM_MAP_SIZE(TRUE_FALSE_MODES_MAP),
+                awbLockStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting AWBLock value %s", __func__, awbLockStr);
+            updateParamEntry(KEY_AUTO_WHITEBALANCE_LOCK, awbLockStr);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf,
+                    CAM_INTF_PARM_AWB_LOCK, (uint32_t)value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid AWBLock value: %s", (awbLockStr == NULL) ? "NULL" : awbLockStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setMCEValue
+ *
+ * DESCRIPTION: set memory color enhancement value
+ *
+ * PARAMETERS :
+ *   @mceStr : MCE value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setMCEValue(const char *mceStr)
+{
+    if (mceStr != NULL) {
+        int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+                PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), mceStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting AWBLock value %s", __func__, mceStr);
+            updateParamEntry(KEY_QC_MEMORY_COLOR_ENHANCEMENT, mceStr);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_MCE, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid MCE value: %s", (mceStr == NULL) ? "NULL" : mceStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setTintlessValue
+ *
+ * DESCRIPTION: enable/disable tintless from user setting
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setTintlessValue(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_TINTLESS_ENABLE);
+    const char *prev_str = get(KEY_QC_TINTLESS_ENABLE);
+    char prop[PROPERTY_VALUE_MAX];
+
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.tintless", prop, VALUE_ENABLE);
+    if (str != NULL) {
+        if (prev_str == NULL ||
+            strcmp(str, prev_str) != 0) {
+            return setTintlessValue(str);
+        }
+    } else {
+        if (prev_str == NULL ||
+            strcmp(prev_str, prop) != 0 ) {
+            setTintlessValue(prop);
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setTintless
+ *
+ * DESCRIPTION: set tintless mode
+ *
+ * PARAMETERS :
+ *   @enable : 1 = enable, 0 = disable
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+void QCameraParameters::setTintless(bool enable)
+{
+    if (enable) {
+        setTintlessValue(VALUE_ENABLE);
+    } else {
+        setTintlessValue(VALUE_DISABLE);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setTintlessValue
+ *
+ * DESCRIPTION: set tintless value
+ *
+ * PARAMETERS :
+ *   @tintStr : Tintless value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setTintlessValue(const char *tintStr)
+{
+    if (tintStr != NULL) {
+        int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+                PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), tintStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting Tintless value %s", __func__, tintStr);
+            updateParamEntry(KEY_QC_TINTLESS_ENABLE, tintStr);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_TINTLESS, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid Tintless value: %s", (tintStr == NULL) ? "NULL" : tintStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setCDSMode
+ *
+ * DESCRIPTION: Set CDS mode
+ *
+ * PARAMETERS :
+ *   @params  : user setting parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setCDSMode(const QCameraParameters& params)
+{
+    const char *str = params.get(KEY_QC_CDS_MODE);
+    const char *prev_str = get(KEY_QC_CDS_MODE);
+    const char *video_str = params.get(KEY_QC_VIDEO_CDS_MODE);
+    const char *video_prev_str = get(KEY_QC_VIDEO_CDS_MODE);
+    int32_t rc = NO_ERROR;
+
+    if (m_bRecordingHint_new == true) {
+        if (video_str) {
+            if ((video_prev_str == NULL) || (strcmp(video_str, video_prev_str) != 0)) {
+                int32_t cds_mode = lookupAttr(CDS_MODES_MAP, PARAM_MAP_SIZE(CDS_MODES_MAP),
+                        video_str);
+                if (cds_mode != NAME_NOT_FOUND) {
+                    updateParamEntry(KEY_QC_VIDEO_CDS_MODE, video_str);
+                    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CDS_MODE, cds_mode)) {
+                        ALOGE("%s:Failed CDS MODE to update table", __func__);
+                        rc = BAD_VALUE;
+                    } else {
+                        CDBG("%s: Set CDS in video mode = %d", __func__, cds_mode);
+                        mCds_mode = cds_mode;
+                    }
+                } else {
+                    ALOGE("%s: Invalid argument for video CDS MODE %d", __func__,  cds_mode);
+                    rc = BAD_VALUE;
+                }
+            }
+        } else {
+            char video_prop[PROPERTY_VALUE_MAX];
+            memset(video_prop, 0, sizeof(video_prop));
+            property_get("persist.camera.video.CDS", video_prop, CDS_MODE_ON);
+            int32_t cds_mode = lookupAttr(CDS_MODES_MAP, PARAM_MAP_SIZE(CDS_MODES_MAP),
+                    video_prop);
+            if (cds_mode != NAME_NOT_FOUND) {
+                updateParamEntry(KEY_QC_VIDEO_CDS_MODE, video_prop);
+                if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CDS_MODE, cds_mode)) {
+                    ALOGE("%s:Failed CDS MODE to update table", __func__);
+                    rc = BAD_VALUE;
+                } else {
+                    CDBG("%s: Set CDS in video mode from setprop = %d", __func__, cds_mode);
+                    mCds_mode = cds_mode;
+                }
+            } else {
+                ALOGE("%s: Invalid prop for video CDS MODE %d", __func__,  cds_mode);
+                rc = BAD_VALUE;
+            }
+        }
+    } else {
+        if (str) {
+            if ((prev_str == NULL) || (strcmp(str, prev_str) != 0)) {
+                int32_t cds_mode = lookupAttr(CDS_MODES_MAP, PARAM_MAP_SIZE(CDS_MODES_MAP),
+                        str);
+                if (cds_mode != NAME_NOT_FOUND) {
+                    updateParamEntry(KEY_QC_CDS_MODE, str);
+                    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CDS_MODE, cds_mode)) {
+                        ALOGE("%s:Failed CDS MODE to update table", __func__);
+                        rc = BAD_VALUE;
+                    } else {
+                        CDBG("%s: Set CDS in capture mode = %d", __func__, cds_mode);
+                        mCds_mode = cds_mode;
+                    }
+                } else {
+                    ALOGE("%s: Invalid argument for snapshot CDS MODE %d", __func__,  cds_mode);
+                    rc = BAD_VALUE;
+                }
+            }
+        } else {
+            char prop[PROPERTY_VALUE_MAX];
+            memset(prop, 0, sizeof(prop));
+            property_get("persist.camera.CDS", prop, CDS_MODE_ON);
+            int32_t cds_mode = lookupAttr(CDS_MODES_MAP, PARAM_MAP_SIZE(CDS_MODES_MAP),
+                    prop);
+            if (cds_mode != NAME_NOT_FOUND) {
+                updateParamEntry(KEY_QC_CDS_MODE, prop);
+                if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CDS_MODE, cds_mode)) {
+                    ALOGE("%s:Failed CDS MODE to update table", __func__);
+                    rc = BAD_VALUE;
+                } else {
+                    CDBG("%s: Set CDS in snapshot mode from setprop = %d", __func__, cds_mode);
+                    mCds_mode = cds_mode;
+                }
+            } else {
+                ALOGE("%s: Invalid prop for snapshot CDS MODE %d", __func__,  cds_mode);
+                rc = BAD_VALUE;
+            }
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setDISValue
+ *
+ * DESCRIPTION: set DIS value
+ *
+ * PARAMETERS :
+ *   @disStr : DIS value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setDISValue(const char *disStr)
+{
+    if (disStr != NULL) {
+        int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+                PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), disStr);
+        if (value != NAME_NOT_FOUND) {
+            //For some IS types (like EIS 2.0), when DIS value is changed, we need to restart
+            //preview because of topology change in backend. But, for now, restart preview
+            //for all IS types.
+            m_bNeedRestart = true;
+            CDBG_HIGH("%s: Setting DIS value %s", __func__, disStr);
+            updateParamEntry(KEY_QC_DIS, disStr);
+            if (!(strcmp(disStr,"enable"))) {
+                m_bDISEnabled = true;
+            } else {
+                m_bDISEnabled = false;
+            }
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_DIS_ENABLE, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid DIS value: %s", (disStr == NULL) ? "NULL" : disStr);
+    m_bDISEnabled = false;
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : updateOisValue
+ *
+ * DESCRIPTION: update OIS value
+ *
+ * PARAMETERS :
+ *   @oisValue : OIS value TRUE/FALSE
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::updateOisValue(bool oisValue)
+{
+    uint8_t enable = 0;
+    int32_t rc = NO_ERROR;
+
+    // Check for OIS disable
+    char ois_prop[PROPERTY_VALUE_MAX];
+    memset(ois_prop, 0, sizeof(ois_prop));
+    property_get("persist.camera.ois.disable", ois_prop, "0");
+    uint8_t ois_disable = (uint8_t)atoi(ois_prop);
+
+    //Enable OIS if it is camera mode or Camcoder 4K mode
+    if (!m_bRecordingHint || (is4k2kVideoResolution() && m_bRecordingHint)) {
+        enable = 1;
+        CDBG_HIGH("%s: Valid OIS mode!! ", __func__);
+    }
+    // Disable OIS if setprop is set
+    if (ois_disable || !oisValue) {
+        //Disable OIS
+        enable = 0;
+        CDBG_HIGH("%s: Disable OIS mode!! ois_disable(%d) oisValue(%d)",
+                __func__, ois_disable, oisValue);
+
+    }
+    m_bOISEnabled = enable;
+    if (m_bOISEnabled) {
+        updateParamEntry(KEY_QC_OIS, VALUE_ENABLE);
+    } else {
+        updateParamEntry(KEY_QC_OIS, VALUE_DISABLE);
+    }
+
+    if (initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    CDBG_HIGH("%s: Sending OIS mode (%d)", __func__, enable);
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_META_LENS_OPT_STAB_MODE, enable)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to parameter changes", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHighFrameRate
+ *
+ * DESCRIPTION: set high frame rate
+ *
+ * PARAMETERS :
+ *   @hfrMode : HFR mode
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setHighFrameRate(const int32_t hfrMode)
+{
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_HFR, hfrMode)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setLensShadeValue
+ *
+ * DESCRIPTION: set lens shade value
+ *
+ * PARAMETERS :
+ *   @lensSahdeStr : lens shade value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setLensShadeValue(const char *lensShadeStr)
+{
+    if (lensShadeStr != NULL) {
+        int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+                PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), lensShadeStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting LensShade value %s", __func__, lensShadeStr);
+            updateParamEntry(KEY_QC_LENSSHADE, lensShadeStr);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_ROLLOFF, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid LensShade value: %s",
+          (lensShadeStr == NULL) ? "NULL" : lensShadeStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setExposureCompensation
+ *
+ * DESCRIPTION: set exposure compensation value
+ *
+ * PARAMETERS :
+ *   @expComp : exposure compensation value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setExposureCompensation(int expComp)
+{
+    char val[16];
+    snprintf(val, sizeof(val), "%d", expComp);
+    updateParamEntry(KEY_EXPOSURE_COMPENSATION, val);
+
+    // Don't need to pass step as part of setParameter because
+    // camera daemon is already aware of it.
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_EXPOSURE_COMPENSATION, expComp)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setWhiteBalance
+ *
+ * DESCRIPTION: set white balance mode
+ *
+ * PARAMETERS :
+ *   @wbStr   : white balance mode value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setWhiteBalance(const char *wbStr)
+{
+    if (wbStr != NULL) {
+        int32_t value = lookupAttr(WHITE_BALANCE_MODES_MAP,
+                PARAM_MAP_SIZE(WHITE_BALANCE_MODES_MAP), wbStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting WhiteBalance value %s", __func__, wbStr);
+            updateParamEntry(KEY_WHITE_BALANCE, wbStr);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_WHITE_BALANCE, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid WhiteBalance value: %s", (wbStr == NULL) ? "NULL" : wbStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setWBManualCCT
+ *
+ * DESCRIPTION: set setWBManualCCT time
+ *
+ * PARAMETERS :
+ *   @cctStr : string of wb cct, range (2000, 8000) in K.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t  QCameraParameters::setWBManualCCT(const char *cctStr)
+{
+    if (cctStr != NULL) {
+        int32_t cctVal = atoi(cctStr);
+        int32_t minCct = m_pCapability->min_wb_cct; /* 2000K */
+        int32_t maxCct = m_pCapability->max_wb_cct; /* 8000K */
+
+        if (cctVal >= minCct && cctVal <= maxCct) {
+            CDBG_HIGH("%s, cct value: %d", __func__, cctVal);
+            updateParamEntry(KEY_QC_WB_MANUAL_CCT, cctStr);
+            cam_manual_wb_parm_t manual_wb;
+            manual_wb.type = CAM_MANUAL_WB_MODE_CCT;
+            manual_wb.cct = cctVal;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_WB_MANUAL, manual_wb)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+
+    ALOGE("Invalid cct, value: %s",
+            (cctStr == NULL) ? "NULL" : cctStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : updateAWBParams
+ *
+ * DESCRIPTION: update CCT parameters key
+ *
+ * PARAMETERS :
+ *   @awb_params : WB parameters
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::updateAWBParams(cam_awb_params_t &awb_params)
+{
+    //check and update CCT
+    int32_t prev_cct = getInt(KEY_QC_WB_MANUAL_CCT);
+    if (prev_cct != awb_params.cct_value) {
+        CDBG("%s: update current cct value. old:%d, now:%d", __func__,
+                prev_cct, awb_params.cct_value);
+        set(KEY_QC_WB_MANUAL_CCT, awb_params.cct_value);
+    }
+
+    //check and update WB gains
+    const char *prev_gains = get(KEY_QC_MANUAL_WB_GAINS);
+    char gainStr[30];
+    snprintf(gainStr, sizeof(gainStr), "%f,%f,%f", awb_params.rgb_gains.r_gain,
+        awb_params.rgb_gains.g_gain, awb_params.rgb_gains.b_gain);
+
+    if (prev_gains == NULL || strcmp(prev_gains, gainStr)) {
+        set(KEY_QC_MANUAL_WB_GAINS, gainStr);
+        CDBG("%s: update currernt RGB gains: old %s new %s",__func__, prev_gains, gainStr);
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : parseGains
+ *
+ * DESCRIPTION: parse WB gains
+ *
+ * PARAMETERS :
+ *   @gainStr : WB result string
+ *   @r_gain  : WB red gain
+ *   @g_gain  : WB green gain
+ *   @b_gain  : WB blue gain
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::parseGains(const char *gainStr, float &r_gain,
+        float &g_gain, float &b_gain)
+{
+    int32_t rc = NO_ERROR;
+    char *saveptr = NULL;
+    char* gains = (char*) calloc(1, strlen(gainStr) + 1);
+    if (NULL == gains) {
+        ALOGE("%s: No memory for gains", __func__);
+        return NO_MEMORY;
+    }
+    strlcpy(gains, gainStr, strlen(gainStr) + 1);
+    char *token = strtok_r(gains, ",", &saveptr);
+
+    if (NULL != token) {
+        r_gain = atof(token);
+        token = strtok_r(NULL, ",", &saveptr);
+    }
+
+    if (NULL != token) {
+        g_gain = atof(token);
+        token = strtok_r(NULL, ",", &saveptr);
+    }
+
+    if (NULL != token) {
+        b_gain = (float) atof(token);
+    } else {
+        ALOGE("%s: Malformed string for gains", __func__);
+        rc = BAD_VALUE;
+    }
+
+    free(gains);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setManualWBGains
+ *
+ * DESCRIPTION: set manual wb gains for r,g,b
+ *
+ * PARAMETERS :
+ *   @cctStr : string of wb gains, range (1.0, 4.0).
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setManualWBGains(const char *gainStr)
+{
+    int32_t rc = NO_ERROR;
+    if (gainStr != NULL) {
+        float r_gain, g_gain, b_gain;
+        rc = parseGains(gainStr, r_gain, g_gain, b_gain);
+        if (rc != NO_ERROR) {
+            return rc;
+        }
+
+        float minGain = m_pCapability->min_wb_gain;
+        float maxGain = m_pCapability->max_wb_gain;
+
+        if ((r_gain >= minGain) && (r_gain <= maxGain) &&
+                (g_gain >= minGain) && (g_gain <= maxGain) &&
+                (b_gain >= minGain) && (b_gain <= maxGain)) {
+            CDBG_HIGH("%s, setting rgb gains: %s", __func__, gainStr);
+            updateParamEntry(KEY_QC_MANUAL_WB_GAINS, gainStr);
+            cam_manual_wb_parm_t manual_wb;
+            manual_wb.type = CAM_MANUAL_WB_MODE_GAIN;
+            manual_wb.gains.r_gain = r_gain;
+            manual_wb.gains.g_gain = g_gain;
+            manual_wb.gains.b_gain = b_gain;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_WB_MANUAL, manual_wb)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+
+    CDBG_HIGH("Invalid manual wb gains: %s",
+          (gainStr == NULL) ? "NULL" : gainStr);
+    return BAD_VALUE;
+}
+
+int QCameraParameters::getAutoFlickerMode()
+{
+    /* Enable Advanced Auto Antibanding where we can set
+       any of the following option
+       ie. CAM_ANTIBANDING_MODE_AUTO
+           CAM_ANTIBANDING_MODE_AUTO_50HZ
+           CAM_ANTIBANDING_MODE_AUTO_60HZ
+      Currently setting it to default    */
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.set.afd", prop, "3");
+    return atoi(prop);
+}
+
+/*===========================================================================
+ * FUNCTION   : setAntibanding
+ *
+ * DESCRIPTION: set antibanding value
+ *
+ * PARAMETERS :
+ *   @antiBandingStr : antibanding value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAntibanding(const char *antiBandingStr)
+{
+    if (antiBandingStr != NULL) {
+        int32_t value = lookupAttr(ANTIBANDING_MODES_MAP, PARAM_MAP_SIZE(ANTIBANDING_MODES_MAP),
+                antiBandingStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting AntiBanding value %s", __func__, antiBandingStr);
+            updateParamEntry(KEY_ANTIBANDING, antiBandingStr);
+            if(value == CAM_ANTIBANDING_MODE_AUTO) {
+               value = getAutoFlickerMode();
+            }
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf,
+                    CAM_INTF_PARM_ANTIBANDING, (uint32_t)value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid AntiBanding value: %s",
+          (antiBandingStr == NULL) ? "NULL" : antiBandingStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFocusAreas
+ *
+ * DESCRIPTION: set focus areas
+ *
+ * PARAMETERS :
+ *   @focusAreasStr : focus areas value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFocusAreas(const char *focusAreasStr)
+{
+    if (m_pCapability->max_num_focus_areas == 0 ||
+        focusAreasStr == NULL) {
+        CDBG("%s: Parameter string is null", __func__);
+        return NO_ERROR;
+    }
+
+    cam_area_t *areas = (cam_area_t *)malloc(sizeof(cam_area_t) * m_pCapability->max_num_focus_areas);
+    if (NULL == areas) {
+        ALOGE("%s: No memory for areas", __func__);
+        return NO_MEMORY;
+    }
+    memset(areas, 0, sizeof(cam_area_t) * m_pCapability->max_num_focus_areas);
+    int num_areas_found = 0;
+    if (parseCameraAreaString(focusAreasStr,
+                              m_pCapability->max_num_focus_areas,
+                              areas,
+                              num_areas_found) != NO_ERROR) {
+        ALOGE("%s: Failed to parse the string: %s", __func__, focusAreasStr);
+        free(areas);
+        return BAD_VALUE;
+    }
+
+    if (validateCameraAreas(areas, num_areas_found) == false) {
+        ALOGE("%s: invalid areas specified : %s", __func__, focusAreasStr);
+        free(areas);
+        return BAD_VALUE;
+    }
+
+    updateParamEntry(KEY_FOCUS_AREAS, focusAreasStr);
+
+    //for special area string (0, 0, 0, 0, 0), set the num_areas_found to 0,
+    //so no action is takenby the lower layer
+    if (num_areas_found == 1 &&
+        areas[0].rect.left == 0 &&
+        areas[0].rect.top == 0 &&
+        areas[0].rect.width == 0 &&
+        areas[0].rect.height == 0 &&
+        areas[0].weight == 0) {
+        num_areas_found = 0;
+    }
+
+    int previewWidth, previewHeight;
+    getPreviewSize(&previewWidth, &previewHeight);
+    cam_roi_info_t af_roi_value;
+    memset(&af_roi_value, 0, sizeof(cam_roi_info_t));
+    af_roi_value.num_roi = (uint8_t)num_areas_found;
+    for (int i = 0; i < num_areas_found; i++) {
+        CDBG_HIGH("%s: FocusArea[%d] = (%d, %d, %d, %d)",
+              __func__, i, (areas[i].rect.top), (areas[i].rect.left),
+              (areas[i].rect.width), (areas[i].rect.height));
+
+        // Transform the coords from (-1000, 1000)
+        // to (0, previewWidth or previewHeight).
+        af_roi_value.roi[i].left =
+                (int32_t)(((double)areas[i].rect.left + 1000.0) *
+                    ((double)previewWidth / 2000.0));
+        af_roi_value.roi[i].top =
+                (int32_t)(((double)areas[i].rect.top + 1000.0) *
+                    ((double)previewHeight / 2000.0));
+        af_roi_value.roi[i].width =
+                (int32_t)((double)areas[i].rect.width *
+                    (double)previewWidth / 2000.0);
+        af_roi_value.roi[i].height =
+                (int32_t)((double)areas[i].rect.height *
+                    (double)previewHeight / 2000.0);
+        af_roi_value.weight[i] = areas[i].weight;
+    }
+    free(areas);
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_AF_ROI, af_roi_value)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setMeteringAreas
+ *
+ * DESCRIPTION: set metering areas value
+ *
+ * PARAMETERS :
+ *   @meteringAreasStr : metering areas value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setMeteringAreas(const char *meteringAreasStr)
+{
+    if (m_pCapability->max_num_metering_areas == 0 ||
+        meteringAreasStr == NULL) {
+        CDBG("%s: Parameter string is null", __func__);
+        return NO_ERROR;
+    }
+
+    cam_area_t *areas = (cam_area_t *)malloc(sizeof(cam_area_t) * m_pCapability->max_num_metering_areas);
+    if (NULL == areas) {
+        ALOGE("%s: No memory for areas", __func__);
+        return NO_MEMORY;
+    }
+    memset(areas, 0, sizeof(cam_area_t) * m_pCapability->max_num_metering_areas);
+    int num_areas_found = 0;
+    if (parseCameraAreaString(meteringAreasStr,
+                              m_pCapability->max_num_metering_areas,
+                              areas,
+                              num_areas_found) < 0) {
+        ALOGE("%s: Failed to parse the string: %s", __func__, meteringAreasStr);
+        free(areas);
+        return BAD_VALUE;
+    }
+
+    if (validateCameraAreas(areas, num_areas_found) == false) {
+        ALOGE("%s: invalid areas specified : %s", __func__, meteringAreasStr);
+        free(areas);
+        return BAD_VALUE;
+    }
+
+    updateParamEntry(KEY_METERING_AREAS, meteringAreasStr);
+
+    //for special area string (0, 0, 0, 0, 0), set the num_areas_found to 0,
+    //so no action is takenby the lower layer
+    if (num_areas_found == 1 &&
+        areas[0].rect.left == 0 &&
+        areas[0].rect.top == 0 &&
+        areas[0].rect.width == 0 &&
+        areas[0].rect.height == 0 &&
+        areas[0].weight == 0) {
+        num_areas_found = 0;
+    }
+    cam_set_aec_roi_t aec_roi_value;
+    int previewWidth, previewHeight;
+    getPreviewSize(&previewWidth, &previewHeight);
+
+    memset(&aec_roi_value, 0, sizeof(cam_set_aec_roi_t));
+    if (num_areas_found > 0) {
+        aec_roi_value.aec_roi_enable = CAM_AEC_ROI_ON;
+        aec_roi_value.aec_roi_type = CAM_AEC_ROI_BY_COORDINATE;
+
+        for (int i = 0; i < num_areas_found; i++) {
+            CDBG_HIGH("%s: MeteringArea[%d] = (%d, %d, %d, %d)",
+                  __func__, i, (areas[i].rect.top), (areas[i].rect.left),
+                  (areas[i].rect.width), (areas[i].rect.height));
+
+            // Transform the coords from (-1000, 1000) to
+            // (0, previewWidth or previewHeight).
+            aec_roi_value.cam_aec_roi_position.coordinate[i].x =
+                    (uint32_t)((((double)areas[i].rect.left +
+                        (double)areas[i].rect.width / 2.0) + 1000.0) *
+                            (double)previewWidth / 2000.0);
+            aec_roi_value.cam_aec_roi_position.coordinate[i].y =
+                    (uint32_t)((((double)areas[i].rect.top +
+                        (double)areas[i].rect.height / 2.0) + 1000.0) *
+                            (double)previewHeight / 2000.0);
+        }
+    } else {
+        aec_roi_value.aec_roi_enable = CAM_AEC_ROI_OFF;
+    }
+    free(areas);
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_AEC_ROI, aec_roi_value)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSceneMode
+ *
+ * DESCRIPTION: set scene mode
+ *
+ * PARAMETERS :
+ *   @sceneModeStr : scene mode value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSceneMode(const char *sceneModeStr)
+{
+    if (sceneModeStr != NULL) {
+        int32_t value = lookupAttr(SCENE_MODES_MAP, PARAM_MAP_SIZE(SCENE_MODES_MAP), sceneModeStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG("%s: Setting SceneMode %s", __func__, sceneModeStr);
+            updateParamEntry(KEY_SCENE_MODE, sceneModeStr);
+            if (m_bSensorHDREnabled) {
+              // Incase of HW HDR mode, we do not update the same as Best shot mode.
+              CDBG_HIGH("%s: H/W HDR mode enabled. Do not set Best Shot Mode", __func__);
+              return NO_ERROR;
+            }
+            if (m_bSceneSelection) {
+                setSelectedScene((cam_scene_mode_type) value);
+            }
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_BESTSHOT_MODE,
+                    (uint32_t)value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("%s: Invalid Secene Mode: %s",
+          __func__, (sceneModeStr == NULL) ? "NULL" : sceneModeStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSelectableZoneAf
+ *
+ * DESCRIPTION: set selectable zone AF algorithm
+ *
+ * PARAMETERS :
+ *   @selZoneAFStr : selectable zone AF algorithm value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSelectableZoneAf(const char *selZoneAFStr)
+{
+    if (selZoneAFStr != NULL) {
+        int32_t value = lookupAttr(FOCUS_ALGO_MAP, PARAM_MAP_SIZE(FOCUS_ALGO_MAP), selZoneAFStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG("%s: Setting Selectable Zone AF value %s", __func__, selZoneAFStr);
+            updateParamEntry(KEY_QC_SELECTABLE_ZONE_AF, selZoneAFStr);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_FOCUS_ALGO_TYPE, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("%s: Invalid selectable zone af value: %s",
+          __func__, (selZoneAFStr == NULL) ? "NULL" : selZoneAFStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : isAEBracketEnabled
+ *
+ * DESCRIPTION: checks if AE bracketing is enabled
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : TRUE/FALSE
+ *==========================================================================*/
+bool QCameraParameters::isAEBracketEnabled()
+{
+    const char *str = get(KEY_QC_AE_BRACKET_HDR);
+    if (str != NULL) {
+        if (strcmp(str, AE_BRACKET_OFF) != 0) {
+            return true;
+        }
+    }
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAEBracket
+ *
+ * DESCRIPTION: set AE bracket value
+ *
+ * PARAMETERS :
+ *   @aecBracketStr : AE bracket value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAEBracket(const char *aecBracketStr)
+{
+    if (aecBracketStr == NULL) {
+        CDBG("%s: setAEBracket with NULL value", __func__);
+        return NO_ERROR;
+    }
+
+    cam_exp_bracketing_t expBracket;
+    memset(&expBracket, 0, sizeof(expBracket));
+
+    int value = lookupAttr(BRACKETING_MODES_MAP, PARAM_MAP_SIZE(BRACKETING_MODES_MAP),
+            aecBracketStr);
+    switch (value) {
+    case CAM_EXP_BRACKETING_ON:
+        {
+            CDBG("%s, EXP_BRACKETING_ON", __func__);
+            const char *str_val = get(KEY_QC_CAPTURE_BURST_EXPOSURE);
+            if ((str_val != NULL) && (strlen(str_val)>0)) {
+                expBracket.mode = CAM_EXP_BRACKETING_ON;
+                m_bAeBracketingEnabled = true;
+                strlcpy(expBracket.values, str_val, MAX_EXP_BRACKETING_LENGTH);
+                CDBG("%s: setting Exposure Bracketing value of %s",
+                      __func__, expBracket.values);
+            }
+            else {
+                /* Apps not set capture-burst-exposures, error case fall into bracketing off mode */
+                CDBG("%s: capture-burst-exposures not set, back to HDR OFF mode", __func__);
+                m_bAeBracketingEnabled = false;
+                expBracket.mode = CAM_EXP_BRACKETING_OFF;
+            }
+        }
+        break;
+    default:
+        {
+            m_bAeBracketingEnabled = false;
+            CDBG_HIGH("%s, EXP_BRACKETING_OFF", __func__);
+            expBracket.mode = CAM_EXP_BRACKETING_OFF;
+        }
+        break;
+    }
+
+    // Cache client AE bracketing configuration
+    memcpy(&m_AEBracketingClient, &expBracket, sizeof(cam_exp_bracketing_t));
+
+    /* save the value*/
+    updateParamEntry(KEY_QC_AE_BRACKET_HDR, aecBracketStr);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : set3ALock
+ *
+ * DESCRIPTION: enable/disable 3A lock.
+ *
+ * PARAMETERS :
+ *   @lockStr : lock value string.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::set3ALock(const char *lockStr)
+{
+    int32_t rc = NO_ERROR;
+    if (lockStr != NULL) {
+        int value = lookupAttr(TRUE_FALSE_MODES_MAP, PARAM_MAP_SIZE(TRUE_FALSE_MODES_MAP),
+                lockStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG_HIGH("%s: Setting Lock lockStr =%s", __func__, lockStr);
+            if(initBatchUpdate(m_pParamBuf) < 0 ) {
+                ALOGE("%s:Failed to initialize group update table", __func__);
+                return BAD_TYPE;
+            }
+            uint32_t focus_mode = CAM_FOCUS_MODE_AUTO;
+            if (value == 1) {
+                if (isUbiFocusEnabled() || isUbiRefocus()) {
+                    //For Ubi focus move focus to infinity.
+                    focus_mode = CAM_FOCUS_MODE_INFINITY;
+                } else if (isOptiZoomEnabled() || isStillMoreEnabled()) {
+                    //For optizoom and stillmore, set focus as fixed.
+                    focus_mode = CAM_FOCUS_MODE_FIXED;
+                }
+            } else {
+                // retrieve previous focus value.
+                const char *focus = get(KEY_FOCUS_MODE);
+                int val = lookupAttr(FOCUS_MODES_MAP, PARAM_MAP_SIZE(FOCUS_MODES_MAP), focus);
+                if (val != NAME_NOT_FOUND) {
+                    focus_mode = (uint32_t) val;
+                    CDBG("%s: focus mode %s", __func__, focus);
+                }
+            }
+            //Lock AWB
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_AWB_LOCK, (uint32_t)value)) {
+                return BAD_VALUE;
+            }
+            //Lock AEC
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_AEC_LOCK, (uint32_t)value)) {
+                return BAD_VALUE;
+            }
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_FOCUS_MODE, focus_mode)) {
+                return BAD_VALUE;
+            }
+
+            rc = commitSetBatch();
+            if (rc != NO_ERROR) {
+                ALOGE("%s:Failed to commit batch", __func__);
+            }
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAndCommitZoom
+ *
+ * DESCRIPTION: set zoom.
+ *
+ * PARAMETERS :
+ *     @zoom_level : zoom level to set.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAndCommitZoom(int zoom_level)
+{
+    CDBG_HIGH("%s: E",__func__);
+    int32_t rc = NO_ERROR;
+    if (initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_ZOOM, zoom_level)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to set Flash value", __func__);
+    }
+
+    mZoomLevel = zoom_level;
+    CDBG_HIGH("%s: X",__func__);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : isOptiZoomEnabled
+ *
+ * DESCRIPTION: checks whether optizoom is enabled
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : true - enabled, false - disabled
+ *
+ *==========================================================================*/
+bool QCameraParameters::isOptiZoomEnabled()
+{
+    if (m_bOptiZoomOn && (0 <= mParmZoomLevel)) {
+        uint32_t zoom_level = (uint32_t) mParmZoomLevel;
+        cam_opti_zoom_t *opti_zoom_settings_need =
+                &(m_pCapability->opti_zoom_settings_need);
+        uint32_t zoom_threshold = (uint32_t) opti_zoom_settings_need->zoom_threshold;
+        CDBG_HIGH("%s: current zoom level =%u & zoom_threshold =%u",
+                __func__, zoom_level, zoom_threshold);
+
+        if (zoom_level >= zoom_threshold) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : commitAFBracket
+ *
+ * DESCRIPTION: commit AF Bracket.
+ *
+ * PARAMETERS :
+ *   @AFBracket : AF bracketing configuration
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::commitAFBracket(cam_af_bracketing_t afBracket)
+{
+
+    int32_t rc = NO_ERROR;
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_FOCUS_BRACKETING, afBracket)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to commit batch", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : commitFlashBracket
+ *
+ * DESCRIPTION: commit Flash Bracket.
+ *
+ * PARAMETERS :
+ *   @AFBracket : Flash bracketing configuration
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::commitFlashBracket(cam_flash_bracketing_t flashBracket)
+{
+    CDBG_HIGH("%s: E",__func__);
+    int32_t rc = NO_ERROR;
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf,
+            CAM_INTF_PARM_FLASH_BRACKETING, flashBracket)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to commit batch", __func__);
+    }
+
+    CDBG_HIGH("%s: X",__func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAFBracket
+ *
+ * DESCRIPTION: set AF bracket value
+ *
+ * PARAMETERS :
+ *   @afBracketStr : AF bracket value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAFBracket(const char *afBracketStr)
+{
+    CDBG_HIGH("%s: afBracketStr =%s",__func__,afBracketStr);
+
+    if(afBracketStr != NULL) {
+        int value = lookupAttr(AF_BRACKETING_MODES_MAP, PARAM_MAP_SIZE(AF_BRACKETING_MODES_MAP),
+                afBracketStr);
+        if (value != NAME_NOT_FOUND) {
+            m_bAFBracketingOn = (value != 0);
+            updateParamEntry(KEY_QC_AF_BRACKET, afBracketStr);
+
+            return NO_ERROR;
+        }
+    }
+
+    ALOGE("Invalid af bracket value: %s",
+        (afBracketStr == NULL) ? "NULL" : afBracketStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setReFocus
+ *
+ * DESCRIPTION: set refocus value
+ *
+ * PARAMETERS :
+ *   @afBracketStr : refocus value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setReFocus(const char *reFocusStr)
+{
+    CDBG_HIGH("%s: reFocusStr =%s",__func__,reFocusStr);
+
+    if (reFocusStr != NULL) {
+        int value = lookupAttr(RE_FOCUS_MODES_MAP, PARAM_MAP_SIZE(RE_FOCUS_MODES_MAP),
+                reFocusStr);
+        if (value != NAME_NOT_FOUND) {
+            m_bReFocusOn = (value != 0);
+            updateParamEntry(KEY_QC_RE_FOCUS, reFocusStr);
+            return NO_ERROR;
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setChromaFlash
+ *
+ * DESCRIPTION: set chroma flash value
+ *
+ * PARAMETERS :
+ *   @aecBracketStr : chroma flash value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setChromaFlash(const char *chromaFlashStr)
+{
+    CDBG_HIGH("%s: chromaFlashStr =%s",__func__,chromaFlashStr);
+    if(chromaFlashStr != NULL) {
+        int value = lookupAttr(CHROMA_FLASH_MODES_MAP, PARAM_MAP_SIZE(CHROMA_FLASH_MODES_MAP),
+                chromaFlashStr);
+        if(value != NAME_NOT_FOUND) {
+            m_bChromaFlashOn = (value != 0);
+            updateParamEntry(KEY_QC_CHROMA_FLASH, chromaFlashStr);
+
+            return NO_ERROR;
+        }
+    }
+
+    ALOGE("Invalid chroma flash value: %s",
+        (chromaFlashStr == NULL) ? "NULL" : chromaFlashStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setOptiZoom
+ *
+ * DESCRIPTION: set opti zoom value
+ *
+ * PARAMETERS :
+ *   @optiZoomStr : opti zoom value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setOptiZoom(const char *optiZoomStr)
+{
+    CDBG_HIGH("%s: optiZoomStr =%s",__func__,optiZoomStr);
+    if(optiZoomStr != NULL) {
+        int value = lookupAttr(OPTI_ZOOM_MODES_MAP, PARAM_MAP_SIZE(OPTI_ZOOM_MODES_MAP),
+                optiZoomStr);
+        if(value != NAME_NOT_FOUND) {
+            m_bOptiZoomOn = (value != 0);
+            updateParamEntry(KEY_QC_OPTI_ZOOM, optiZoomStr);
+
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid opti zoom value: %s",
+        (optiZoomStr == NULL) ? "NULL" : optiZoomStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setTruePortrait
+ *
+ * DESCRIPTION: set true portrait value
+ *
+ * PARAMETERS :
+ *   @optiZoomStr : true portrait value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setTruePortrait(const char *truePortraitStr)
+{
+    CDBG_HIGH("%s: truePortraitStr =%s", __func__, truePortraitStr);
+    if (truePortraitStr != NULL) {
+        int value = lookupAttr(TRUE_PORTRAIT_MODES_MAP,
+                PARAM_MAP_SIZE(TRUE_PORTRAIT_MODES_MAP),
+                truePortraitStr);
+        if (value != NAME_NOT_FOUND) {
+            m_bTruePortraitOn = (value != 0);
+            updateParamEntry(KEY_QC_TRUE_PORTRAIT, truePortraitStr);
+            return NO_ERROR;
+        }
+    }
+    CDBG_HIGH("Invalid true portrait value: %s",
+            (truePortraitStr == NULL) ? "NULL" : truePortraitStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHDRMode
+ *
+ * DESCRIPTION: set hdr mode value
+ *
+ * PARAMETERS :
+ *   @hdrModeStr : hdr mode value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setHDRMode(const char *hdrModeStr)
+{
+    CDBG_HIGH("%s: hdrModeStr =%s", __func__, hdrModeStr);
+    if (hdrModeStr != NULL) {
+        int value = lookupAttr(HDR_MODES_MAP, PARAM_MAP_SIZE(HDR_MODES_MAP), hdrModeStr);
+        if (value != NAME_NOT_FOUND) {
+            const char *str = get(KEY_SCENE_MODE);
+
+            m_bHDRModeSensor = !strncmp(hdrModeStr, HDR_MODE_SENSOR, strlen(HDR_MODE_SENSOR));
+
+            updateParamEntry(KEY_QC_HDR_MODE, hdrModeStr);
+
+            // If hdr is already selected, need to deselect it in local cache
+            // So the new hdr mode will be applied
+            if (str && !strncmp(str, SCENE_MODE_HDR, strlen(SCENE_MODE_HDR))) {
+                updateParamEntry(KEY_SCENE_MODE, SCENE_MODE_AUTO);
+                m_bNeedRestart = true;
+            }
+
+            return NO_ERROR;
+        }
+    }
+    CDBG_HIGH("Invalid hdr mode value: %s",
+            (hdrModeStr == NULL) ? "NULL" : hdrModeStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSeeMore
+ *
+ * DESCRIPTION: set see more value
+ *
+ * PARAMETERS :
+ *   @seeMoreStr : see more value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSeeMore(const char *seeMoreStr)
+{
+    int32_t rc = NO_ERROR;
+
+    CDBG_HIGH("%s: seeMoreStr =%s", __func__, seeMoreStr);
+    if (seeMoreStr != NULL) {
+        int value = lookupAttr(ON_OFF_MODES_MAP,
+                PARAM_MAP_SIZE(ON_OFF_MODES_MAP),
+                seeMoreStr);
+        if (value != NAME_NOT_FOUND) {
+            m_bSeeMoreOn = (value != 0);
+
+            // If SeeMore is enabled, enable StillMore for live snapshot
+            // and disable tone map
+            if (m_bSeeMoreOn) {
+                m_bStillMoreOn = TRUE;
+                rc = setToneMapMode(false, false);
+                if (rc != NO_ERROR) {
+                    CDBG_HIGH("%s: Failed to disable tone map during SeeMore", __func__);
+                }
+            } else {
+                m_bStillMoreOn = FALSE;
+                rc = setToneMapMode(true, false);
+                if (rc != NO_ERROR) {
+                    CDBG_HIGH("%s: Failed to enable tone map during SeeMore", __func__);
+                }
+            }
+            updateParamEntry(KEY_QC_SEE_MORE, seeMoreStr);
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid see more value: %s",
+            (seeMoreStr == NULL) ? "NULL" : seeMoreStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setStillMore
+ *
+ * DESCRIPTION: set still more value
+ *
+ * PARAMETERS :
+ *   @seeMoreStr : still more value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setStillMore(const char *stillMoreStr)
+{
+    CDBG_HIGH("%s: stillMoreStr =%s", __func__, stillMoreStr);
+    if (stillMoreStr != NULL) {
+        int value = lookupAttr(STILL_MORE_MODES_MAP, PARAM_MAP_SIZE(STILL_MORE_MODES_MAP),
+                stillMoreStr);
+        if (value != NAME_NOT_FOUND) {
+            m_bStillMoreOn = (value != 0);
+            updateParamEntry(KEY_QC_STILL_MORE, stillMoreStr);
+
+            return NO_ERROR;
+        }
+    }
+    ALOGE("Invalid still more value: %s",
+            (stillMoreStr == NULL) ? "NULL" : stillMoreStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHDRNeed1x
+ *
+ * DESCRIPTION: set hdr need 1x value
+ *
+ * PARAMETERS :
+ *   @hdrModeStr : hdr need 1x value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setHDRNeed1x(const char *hdrNeed1xStr)
+{
+    CDBG_HIGH("%s: hdrNeed1xStr =%s", __func__, hdrNeed1xStr);
+    if (hdrNeed1xStr != NULL) {
+        int value = lookupAttr(TRUE_FALSE_MODES_MAP, PARAM_MAP_SIZE(TRUE_FALSE_MODES_MAP),
+                hdrNeed1xStr);
+        if (value != NAME_NOT_FOUND) {
+            updateParamEntry(KEY_QC_HDR_NEED_1X, hdrNeed1xStr);
+            m_bHDR1xFrameEnabled = !strncmp(hdrNeed1xStr, VALUE_TRUE, strlen(VALUE_TRUE));
+            m_bNeedRestart = true;
+
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_HDR_NEED_1X,
+                    m_bHDR1xFrameEnabled)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+
+    CDBG_HIGH("Invalid hdr need 1x value: %s",
+            (hdrNeed1xStr == NULL) ? "NULL" : hdrNeed1xStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setAEBracketing
+ *
+ * DESCRIPTION: enables AE bracketing
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setAEBracketing()
+{
+    int32_t rc = NO_ERROR;
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_HDR, m_AEBracketingClient)) {
+        ALOGE("%s:Failed to update AE bracketing", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to configure AE bracketing", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHDRAEBracket
+ *
+ * DESCRIPTION: enables AE bracketing for HDR
+ *
+ * PARAMETERS :
+ *   @hdrBracket : HDR bracketing configuration
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setHDRAEBracket(cam_exp_bracketing_t hdrBracket)
+{
+    int32_t rc = NO_ERROR;
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_HDR, hdrBracket)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_TYPE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to configure HDR bracketing", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : restoreAEBracket
+ *
+ * DESCRIPTION: restores client AE bracketing configuration after HDR is done
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::stopAEBracket()
+{
+  cam_exp_bracketing_t bracketing;
+
+  bracketing.mode = CAM_EXP_BRACKETING_OFF;
+
+  return setHDRAEBracket(bracketing);
+}
+
+/*===========================================================================
+ * FUNCTION   : updateFlash
+ *
+ * DESCRIPTION: restores client flash configuration or disables flash
+ *
+ * PARAMETERS :
+ *   @commitSettings : flag indicating whether settings need to be commited
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::updateFlash(bool commitSettings)
+{
+    int32_t rc = NO_ERROR;
+    int32_t value;
+
+    if (commitSettings) {
+      if(initBatchUpdate(m_pParamBuf) < 0 ) {
+          ALOGE("%s:Failed to initialize group update table", __func__);
+          return BAD_TYPE;
+      }
+    }
+
+    if (isHDREnabled() || m_bAeBracketingEnabled || m_bAFBracketingOn ||
+          m_bOptiZoomOn || m_bReFocusOn) {
+        value = CAM_FLASH_MODE_OFF;
+    } else if (m_bChromaFlashOn) {
+        value = CAM_FLASH_MODE_ON;
+    } else {
+        value = mFlashValue;
+    }
+
+    if (value != mFlashDaemonValue) {
+        CDBG("%s: Setting Flash value %d", __func__, value);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_LED_MODE, value)) {
+            ALOGE("%s:Failed to set led mode", __func__);
+            return BAD_VALUE;
+        }
+        mFlashDaemonValue = value;
+    } else {
+        rc = NO_ERROR;
+    }
+
+    if (commitSettings) {
+        rc = commitSetBatch();
+        if (rc != NO_ERROR) {
+            ALOGE("%s:Failed to configure HDR bracketing", __func__);
+            return rc;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setRedeyeReduction
+ *
+ * DESCRIPTION: set red eye reduction value
+ *
+ * PARAMETERS :
+ *   @redeyeStr : red eye reduction value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setRedeyeReduction(const char *redeyeStr)
+{
+    if (redeyeStr != NULL) {
+        int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+                PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), redeyeStr);
+        if (value != NAME_NOT_FOUND) {
+            CDBG("%s: Setting RedEye Reduce value %s", __func__, redeyeStr);
+            updateParamEntry(KEY_QC_REDEYE_REDUCTION, redeyeStr);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf,
+                    CAM_INTF_PARM_REDEYE_REDUCTION, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("%s: Invalid RedEye Reduce value: %s",
+          __func__, (redeyeStr == NULL) ? "NULL" : redeyeStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : getDenoiseProcessPlate
+ *
+ * DESCRIPTION: query denoise process plate
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : NR process plate vlaue
+ *==========================================================================*/
+cam_denoise_process_type_t
+        QCameraParameters::getDenoiseProcessPlate(cam_intf_parm_type_t type)
+{
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    cam_denoise_process_type_t processPlate = CAM_WAVELET_DENOISE_CBCR_ONLY;
+    if (CAM_INTF_PARM_WAVELET_DENOISE == type) {
+        property_get("persist.denoise.process.plates", prop, "");
+    } else if (CAM_INTF_PARM_TEMPORAL_DENOISE == type) {
+        property_get("persist.tnr.process.plates", prop, "");
+    } else {
+        ALOGE("%s: Type not supported", __func__);
+        prop[0] = '\0';
+    }
+    if (strlen(prop) > 0) {
+        switch(atoi(prop)) {
+        case 0:
+            processPlate = CAM_WAVELET_DENOISE_YCBCR_PLANE;
+            break;
+        case 1:
+            processPlate = CAM_WAVELET_DENOISE_CBCR_ONLY;
+            break;
+        case 2:
+            processPlate = CAM_WAVELET_DENOISE_STREAMLINE_YCBCR;
+            break;
+        case 3:
+            processPlate = CAM_WAVELET_DENOISE_STREAMLINED_CBCR;
+            break;
+        default:
+            processPlate = CAM_WAVELET_DENOISE_CBCR_ONLY;
+            break;
+        }
+    }
+    return processPlate;
+}
+
+/*===========================================================================
+ * FUNCTION   : setWaveletDenoise
+ *
+ * DESCRIPTION: set wavelet denoise value
+ *
+ * PARAMETERS :
+ *   @wnrStr : wavelet denoise value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setWaveletDenoise(const char *wnrStr)
+{
+    if ((m_pCapability->qcom_supported_feature_mask & CAM_QCOM_FEATURE_DENOISE2D) == 0){
+        CDBG_HIGH("%s: WNR is not supported",__func__);
+        return NO_ERROR;
+    }
+
+    if (wnrStr != NULL) {
+        int value = lookupAttr(DENOISE_ON_OFF_MODES_MAP,
+                PARAM_MAP_SIZE(DENOISE_ON_OFF_MODES_MAP), wnrStr);
+        if (value != NAME_NOT_FOUND) {
+            updateParamEntry(KEY_QC_DENOISE, wnrStr);
+
+            cam_denoise_param_t temp;
+            memset(&temp, 0, sizeof(temp));
+            temp.denoise_enable = (uint8_t)value;
+            m_bWNROn = (value != 0);
+            if (m_bWNROn) {
+                temp.process_plates = getDenoiseProcessPlate(CAM_INTF_PARM_WAVELET_DENOISE);
+            }
+            CDBG("%s: Denoise enable=%d, plates=%d",
+                  __func__, temp.denoise_enable, temp.process_plates);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_WAVELET_DENOISE, temp)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("%s: Invalid Denoise value: %s", __func__, (wnrStr == NULL) ? "NULL" : wnrStr);
+    return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setRdiMode
+ *
+ * DESCRIPTION: set rdi mode value
+ *
+ * PARAMETERS :
+ *   @str     : rdi mode value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setRdiMode(const char *str)
+{
+    CDBG("RDI_DEBUG %s: rdi mode value: %s", __func__, str);
+
+    if (str != NULL) {
+        int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+                PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), str);
+        if (value != NAME_NOT_FOUND) {
+            updateParamEntry(KEY_QC_RDI_MODE, str);
+            m_bRdiMode = (value == 0) ? false : true;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_RDI_MODE, value)) {
+                return BAD_VALUE;
+            }
+            return NO_ERROR;
+        }
+    }
+    ALOGE("%s: Invalid rdi mode value: %s", __func__, (str == NULL) ? "NULL" : str);
+    return BAD_VALUE;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : setSecureMode
+ *
+ * DESCRIPTION: set secure mode value
+ *
+ * PARAMETERS :
+ *   @str     : secure mode value string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setSecureMode(const char *str)
+{
+  ALOGD("%s: Secure mode value: %s", __func__, str);
+
+  if (str != NULL) {
+    int32_t value = lookupAttr(ENABLE_DISABLE_MODES_MAP,
+            PARAM_MAP_SIZE(ENABLE_DISABLE_MODES_MAP), str);
+    if (value != NAME_NOT_FOUND) {
+        updateParamEntry(KEY_QC_SECURE_MODE, str);
+        m_bSecureMode = (value == 0)? false : true;
+        return NO_ERROR;
+    }
+  }
+  ALOGE("%s: Invalid Secure mode value: %s",
+    __func__, (str == NULL) ? "NULL" : str);
+  return BAD_VALUE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setPreviewFrameRateMode
+ *
+ * DESCRIPTION: set preview frame rate mode
+ *
+ * PARAMETERS :
+ *   @mode    : preview frame rate mode
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::setPreviewFrameRateMode(const char *mode)
+{
+    set(KEY_QC_PREVIEW_FRAME_RATE_MODE, mode);
+}
+
+/*===========================================================================
+ * FUNCTION   : getPreviewFrameRateMode
+ *
+ * DESCRIPTION: get preview frame rate mode
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : preview frame rate mode string
+ *==========================================================================*/
+const char *QCameraParameters::getPreviewFrameRateMode() const
+{
+    return get(KEY_QC_PREVIEW_FRAME_RATE_MODE);
+}
+
+/*===========================================================================
+ * FUNCTION   : setTouchIndexAec
+ *
+ * DESCRIPTION: set touch index AEC
+ *
+ * PARAMETERS :
+ *   @x,y     :
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::setTouchIndexAec(int x, int y)
+{
+    char str[32];
+    snprintf(str, sizeof(str), "%dx%d", x, y);
+    set(KEY_QC_TOUCH_INDEX_AEC, str);
+}
+
+/*===========================================================================
+ * FUNCTION   : getTouchIndexAec
+ *
+ * DESCRIPTION: get touch index AEC
+ *
+ * PARAMETERS :
+ *   @x,y     :
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::getTouchIndexAec(int *x, int *y)
+{
+    *x = -1;
+    *y = -1;
+
+    // Get the current string, if it doesn't exist, leave the -1x-1
+    const char *p = get(KEY_QC_TOUCH_INDEX_AEC);
+    if (p == 0)
+        return;
+
+    int tempX, tempY;
+    if (parse_pair(p, &tempX, &tempY, 'x') == 0) {
+        *x = tempX;
+        *y = tempY;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setTouchIndexAf
+ *
+ * DESCRIPTION: set touch index AF
+ *
+ * PARAMETERS :
+ *   @x,y     :
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::setTouchIndexAf(int x, int y)
+{
+    char str[32];
+    snprintf(str, sizeof(str), "%dx%d", x, y);
+    set(KEY_QC_TOUCH_INDEX_AF, str);
+}
+
+/*===========================================================================
+ * FUNCTION   : getTouchIndexAf
+ *
+ * DESCRIPTION: get touch index AF
+ *
+ * PARAMETERS :
+ *   @x,y     :
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::getTouchIndexAf(int *x, int *y)
+{
+    *x = -1;
+    *y = -1;
+
+    // Get the current string, if it doesn't exist, leave the -1x-1
+    const char *p = get(KEY_QC_TOUCH_INDEX_AF);
+    if (p == 0)
+        return;
+
+    int tempX, tempY;
+    if (parse_pair(p, &tempX, &tempY, 'x') == 0) {
+        *x = tempX;
+        *y = tempY;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamFormat
+ *
+ * DESCRIPTION: get stream format by its type
+ *
+ * PARAMETERS :
+ *   @streamType : [input] stream type
+ *   @format     : [output] stream format
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getStreamRotation(cam_stream_type_t streamType,
+                                            cam_pp_feature_config_t &featureConfig,
+                                            cam_dimension_t &dim)
+{
+    int32_t ret = NO_ERROR;
+    const char *str = get(KEY_QC_VIDEO_ROTATION);
+    int rotationParam = lookupAttr(VIDEO_ROTATION_MODES_MAP,
+            PARAM_MAP_SIZE(VIDEO_ROTATION_MODES_MAP), str);
+    featureConfig.rotation = ROTATE_0;
+    int swapDim = 0;
+    switch (streamType) {
+        case CAM_STREAM_TYPE_VIDEO:
+            switch(rotationParam) {
+                case 90:
+                    featureConfig.feature_mask |= CAM_QCOM_FEATURE_ROTATION;
+                    featureConfig.rotation = ROTATE_90;
+                    swapDim = 1;
+                    break;
+                case 180:
+                    featureConfig.feature_mask |= CAM_QCOM_FEATURE_ROTATION;
+                    featureConfig.rotation = ROTATE_180;
+                    break;
+                case 270:
+                    featureConfig.feature_mask |= CAM_QCOM_FEATURE_ROTATION;
+                    featureConfig.rotation = ROTATE_270;
+                    swapDim = 1;
+                    break;
+                default:
+                    featureConfig.rotation = ROTATE_0;
+            }
+            break;
+        case CAM_STREAM_TYPE_PREVIEW:
+        case CAM_STREAM_TYPE_POSTVIEW:
+        case CAM_STREAM_TYPE_SNAPSHOT:
+        case CAM_STREAM_TYPE_RAW:
+        case CAM_STREAM_TYPE_METADATA:
+        case CAM_STREAM_TYPE_OFFLINE_PROC:
+        case CAM_STREAM_TYPE_DEFAULT:
+        default:
+            break;
+    }
+
+    if (swapDim > 0) {
+        int w = 0;
+        w = dim.width;
+        dim.width = dim.height;
+        dim.height = w;
+    }
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamFormat
+ *
+ * DESCRIPTION: get stream format by its type
+ *
+ * PARAMETERS :
+ *   @streamType : [input] stream type
+ *   @format     : [output] stream format
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getStreamFormat(cam_stream_type_t streamType,
+                                            cam_format_t &format)
+{
+    int32_t ret = NO_ERROR;
+
+    format = CAM_FORMAT_MAX;
+    switch (streamType) {
+    case CAM_STREAM_TYPE_PREVIEW:
+    case CAM_STREAM_TYPE_POSTVIEW:
+    case CAM_STREAM_TYPE_CALLBACK:
+        format = mPreviewFormat;
+        break;
+    case CAM_STREAM_TYPE_ANALYSIS:
+        if (m_pCapability->analysis_recommended_format ==
+                CAM_FORMAT_Y_ONLY) {
+            format = m_pCapability->analysis_recommended_format;
+        } else {
+            ALOGE("%s:%d invalid analysis_recommended_format %d\n",
+                    m_pCapability->analysis_recommended_format);
+            format = mPreviewFormat;
+        }
+      break;
+    case CAM_STREAM_TYPE_SNAPSHOT:
+        if ( mPictureFormat == CAM_FORMAT_YUV_422_NV16 ) {
+            format = CAM_FORMAT_YUV_422_NV16;
+        } else {
+            char prop[PROPERTY_VALUE_MAX];
+            int snapshotFormat;
+            memset(prop, 0, sizeof(prop));
+            property_get("persist.camera.snap.format", prop, "0");
+            snapshotFormat = atoi(prop);
+            if(snapshotFormat == 1) {
+                format = CAM_FORMAT_YUV_422_NV61;
+            } else {
+                format = CAM_FORMAT_YUV_420_NV21;
+            }
+        }
+        break;
+    case CAM_STREAM_TYPE_VIDEO:
+        format = CAM_FORMAT_YUV_420_NV12;
+        break;
+    case CAM_STREAM_TYPE_RAW:
+        if (isRdiMode()) {
+            format = m_pCapability->rdi_mode_stream_fmt;
+        } else if (mPictureFormat >= CAM_FORMAT_YUV_RAW_8BIT_YUYV) {
+            format = (cam_format_t)mPictureFormat;
+        } else if (getofflineRAW()) {
+            format = CAM_FORMAT_BAYER_MIPI_RAW_10BPP_BGGR;
+        } else {
+            char raw_format[PROPERTY_VALUE_MAX];
+            int rawFormat;
+            memset(raw_format, 0, sizeof(raw_format));
+            /*Default value is CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG*/
+            property_get("persist.camera.raw.format", raw_format, "16");
+            rawFormat = atoi(raw_format);
+            format = (cam_format_t)rawFormat;
+            CDBG_HIGH("%s: Raw stream format %d bundled with snapshot",
+                   __func__, format);
+        }
+        break;
+    case CAM_STREAM_TYPE_METADATA:
+    case CAM_STREAM_TYPE_OFFLINE_PROC:
+    case CAM_STREAM_TYPE_DEFAULT:
+    default:
+        break;
+    }
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFlipMode
+ *
+ * DESCRIPTION: get flip mode
+ *
+ * PARAMETERS :
+ *   @cam_intf_parm_type_t : [input] stream type
+ *
+ * RETURN     : int type of flip mode
+ *              0 - no filp
+ *              1 - FLIP_H
+ *              2 - FLIP_V
+ *              3 - FLIP_H | FLIP_V
+ *==========================================================================*/
+int QCameraParameters::getFlipMode(cam_stream_type_t type)
+{
+    const char *str = NULL;
+    int flipMode = 0; // no flip
+
+    switch(type){
+    case CAM_STREAM_TYPE_PREVIEW:
+        if (!isRdiMode()) {
+            str = get(KEY_QC_PREVIEW_FLIP);
+        }
+        break;
+    case CAM_STREAM_TYPE_VIDEO:
+        str = get(KEY_QC_VIDEO_FLIP);
+        break;
+    case CAM_STREAM_TYPE_SNAPSHOT:
+    case CAM_STREAM_TYPE_POSTVIEW:
+        str = get(KEY_QC_SNAPSHOT_PICTURE_FLIP);
+        break;
+    default:
+        CDBG("%s: No flip mode for stream type %d", __func__, type);
+        break;
+    }
+
+    if(str != NULL){
+        //Need give corresponding filp value based on flip mode strings
+        int value = lookupAttr(FLIP_MODES_MAP, PARAM_MAP_SIZE(FLIP_MODES_MAP), str);
+        if(value != NAME_NOT_FOUND)
+            flipMode = value;
+        }
+
+    CDBG_HIGH("%s: the filp mode of stream type %d is %d .", __func__, type, flipMode);
+    return flipMode;
+}
+
+/*===========================================================================
+ * FUNCTION   : isSnapshotFDNeeded
+ *
+ * DESCRIPTION: check whether Face Detection Metadata is needed
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : bool type of status
+ *              0 - need
+ *              1 - not need
+ *==========================================================================*/
+bool QCameraParameters::isSnapshotFDNeeded()
+{
+    return getInt(KEY_QC_SNAPSHOT_FD_DATA);
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamDimension
+ *
+ * DESCRIPTION: get stream dimension by its type
+ *
+ * PARAMETERS :
+ *   @streamType : [input] stream type
+ *   @dim        : [output] stream dimension
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getStreamDimension(cam_stream_type_t streamType,
+                                               cam_dimension_t &dim)
+{
+    int32_t ret = NO_ERROR;
+    memset(&dim, 0, sizeof(cam_dimension_t));
+
+    switch (streamType) {
+    case CAM_STREAM_TYPE_PREVIEW:
+        getPreviewSize(&dim.width, &dim.height);
+        break;
+    case CAM_STREAM_TYPE_POSTVIEW:
+        getPreviewSize(&dim.width, &dim.height);
+        break;
+    case CAM_STREAM_TYPE_SNAPSHOT:
+        if (getRecordingHintValue() == true) {
+            // live snapshot
+            getLiveSnapshotSize(dim);
+        } else {
+            getPictureSize(&dim.width, &dim.height);
+        }
+        break;
+    case CAM_STREAM_TYPE_VIDEO:
+        getVideoSize(&dim.width, &dim.height);
+        break;
+    case CAM_STREAM_TYPE_RAW:
+        //dim = m_pCapability->raw_dim;
+        getRawSize(dim);
+        break;
+    case CAM_STREAM_TYPE_METADATA:
+        dim.width = (int32_t)sizeof(metadata_buffer_t);
+        dim.height = 1;
+        break;
+    case CAM_STREAM_TYPE_OFFLINE_PROC:
+        break;
+    case CAM_STREAM_TYPE_ANALYSIS:
+        cam_dimension_t prv_dim, max_dim;
+
+        /* Analysis stream need aspect ratio as preview stream */
+        getPreviewSize(&prv_dim.width, &prv_dim.height);
+
+        max_dim.width = m_pCapability->analysis_max_res.width;
+        max_dim.height = m_pCapability->analysis_max_res.height;
+
+        if (prv_dim.width > max_dim.width || prv_dim.height > max_dim.height) {
+            double max_ratio, requested_ratio;
+
+            max_ratio = (double)max_dim.width / (double)max_dim.height;
+            requested_ratio = (double)prv_dim.width / (double)prv_dim.height;
+
+            if (max_ratio < requested_ratio) {
+                dim.width = max_dim.width;
+                dim.height = (int32_t)((double)dim.width / requested_ratio);
+            } else {
+                dim.height = max_dim.height;
+                dim.width = (int32_t)((double)max_dim.height * requested_ratio);
+            }
+            dim.width &= ~0x1;
+            dim.height &= ~0x1;
+        } else {
+            dim.width = prv_dim.width;
+            dim.height = prv_dim.height;
+        }
+      break;
+    case CAM_STREAM_TYPE_DEFAULT:
+    default:
+        ALOGE("%s: no dimension for unsupported stream type %d",
+              __func__, streamType);
+        ret = BAD_VALUE;
+        break;
+    }
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPreviewHalPixelFormat
+ *
+ * DESCRIPTION: get preview HAL pixel format
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : HAL pixel format
+ *==========================================================================*/
+int QCameraParameters::getPreviewHalPixelFormat() const
+{
+    int32_t halPixelFormat;
+
+    switch (mPreviewFormat) {
+    case CAM_FORMAT_YUV_420_NV12:
+        halPixelFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP;
+        break;
+    case CAM_FORMAT_YUV_420_NV21:
+        halPixelFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP;
+        break;
+    case CAM_FORMAT_YUV_420_NV21_ADRENO:
+        halPixelFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO;
+        break;
+    case CAM_FORMAT_YUV_420_YV12:
+        halPixelFormat = HAL_PIXEL_FORMAT_YV12;
+        break;
+    case CAM_FORMAT_YUV_420_NV12_VENUS:
+        halPixelFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP_VENUS;
+        break;
+    case CAM_FORMAT_YUV_422_NV16:
+    case CAM_FORMAT_YUV_422_NV61:
+    default:
+        halPixelFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP;
+        break;
+    }
+    CDBG_HIGH("%s: format %d\n", __func__, halPixelFormat);
+    return halPixelFormat;
+}
+
+/*===========================================================================
+ * FUNCTION   : getthumbnailSize
+ *
+ * DESCRIPTION: get thumbnail size
+ *
+ * PARAMETERS :
+ *   @width, height : [output] thumbnail width and height
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::getThumbnailSize(int *width, int *height) const
+{
+    *width = getInt(KEY_JPEG_THUMBNAIL_WIDTH);
+    *height = getInt(KEY_JPEG_THUMBNAIL_HEIGHT);
+}
+
+/*===========================================================================
+ * FUNCTION   : getZSLBurstInterval
+ *
+ * DESCRIPTION: get ZSL burst interval setting
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : ZSL burst interval value
+ *==========================================================================*/
+uint8_t QCameraParameters::getZSLBurstInterval()
+{
+    int interval = getInt(KEY_QC_ZSL_BURST_INTERVAL);
+    if (interval < 0) {
+        interval = 1;
+    }
+    return (uint8_t)interval;
+}
+
+/*===========================================================================
+ * FUNCTION   : getZSLQueueDepth
+ *
+ * DESCRIPTION: get ZSL queue depth
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : ZSL queue depth value
+ *==========================================================================*/
+uint8_t QCameraParameters::getZSLQueueDepth()
+{
+    int qdepth = getInt(KEY_QC_ZSL_QUEUE_DEPTH);
+    if (qdepth < 0) {
+        qdepth = 2;
+    }
+    return (uint8_t)qdepth;
+}
+
+/*===========================================================================
+ * FUNCTION   : getZSLBackLookCount
+ *
+ * DESCRIPTION: get ZSL backlook count setting
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : ZSL backlook count value
+ *==========================================================================*/
+uint8_t QCameraParameters::getZSLBackLookCount()
+{
+    int look_back = getInt(KEY_QC_ZSL_BURST_LOOKBACK);
+    if (look_back < 0) {
+        look_back = 2;
+    }
+    return (uint8_t)look_back;
+}
+
+/*===========================================================================
+ * FUNCTION   : getZSLMaxUnmatchedFrames
+ *
+ * DESCRIPTION: get allowed ZSL max unmatched frames number
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : ZSL backlook count value
+ *==========================================================================*/
+uint8_t QCameraParameters::getMaxUnmatchedFramesInQueue()
+{
+    return (uint8_t)(m_pCapability->min_num_pp_bufs + (m_nBurstNum / 10));
+}
+
+/*===========================================================================
+ * FUNCTION   : setRecordingHintValue
+ *
+ * DESCRIPTION: set recording hint
+ *
+ * PARAMETERS :
+ *   @value   : video hint value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraParameters::setRecordingHintValue(int32_t value)
+{
+    CDBG_HIGH("%s: VideoHint = %d", __func__, value);
+    bool newValue = (value > 0)? true : false;
+
+    if ( m_bRecordingHint != newValue ) {
+        m_bNeedRestart = true;
+        m_bRecordingHint_new = newValue;
+    } else {
+        m_bRecordingHint_new = m_bRecordingHint;
+    }
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_RECORDING_HINT, value)) {
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumOfSnapshots
+ *
+ * DESCRIPTION: get number of snapshot per shutter
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of snapshot per shutter
+ *==========================================================================*/
+uint8_t QCameraParameters::getNumOfSnapshots()
+{
+    uint8_t numOfSnapshot = 1;
+    int val = getInt(KEY_QC_NUM_SNAPSHOT_PER_SHUTTER);
+    if (0 < val) {
+        numOfSnapshot = (uint8_t)val;
+    }
+
+    return (uint8_t)numOfSnapshot;
+}
+
+/*===========================================================================
+ * FUNCTION   : getBurstCountForAdvancedCapture
+ *
+ * DESCRIPTION: get burst count for advanced capture.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of snapshot required for advanced capture.
+ *==========================================================================*/
+uint8_t QCameraParameters::getBurstCountForAdvancedCapture()
+{
+    uint32_t burstCount = 0;
+    if (isUbiFocusEnabled()) {
+        //number of snapshots required for Ubi Focus.
+        burstCount = m_pCapability->ubifocus_af_bracketing_need.burst_count;
+    } else if (isUbiRefocus()) {
+        //number of snapshots required for Opti Zoom.
+        burstCount = m_pCapability->refocus_af_bracketing_need.burst_count;
+    } else if (isOptiZoomEnabled()) {
+        //number of snapshots required for Opti Zoom.
+        burstCount = m_pCapability->opti_zoom_settings_need.burst_count;
+    } else if (isChromaFlashEnabled()) {
+        //number of snapshots required for Chroma Flash.
+        burstCount = m_pCapability->chroma_flash_settings_need.burst_count;
+    } else if (isStillMoreEnabled()) {
+        //number of snapshots required for Still More.
+        if (isSeeMoreEnabled()) {
+            burstCount = 1;
+        } else if ((m_stillmore_config.burst_count >=
+                m_pCapability->stillmore_settings_need.min_burst_count) &&
+                (m_stillmore_config.burst_count <=
+                m_pCapability->stillmore_settings_need.max_burst_count)) {
+            burstCount = m_stillmore_config.burst_count;
+        } else {
+            burstCount = m_pCapability->stillmore_settings_need.burst_count;
+        }
+    } else if (isHDREnabled()) {
+        //number of snapshots required for HDR.
+        burstCount = m_pCapability->hdr_bracketing_setting.num_frames;
+    } else if (isAEBracketEnabled()) {
+      burstCount = 0;
+      const char *str_val = m_AEBracketingClient.values;
+      if ((str_val != NULL) && (strlen(str_val) > 0)) {
+          char prop[PROPERTY_VALUE_MAX];
+          memset(prop, 0, sizeof(prop));
+          strlcpy(prop, str_val, PROPERTY_VALUE_MAX);
+          char *saveptr = NULL;
+          char *token = strtok_r(prop, ",", &saveptr);
+          while (token != NULL) {
+              token = strtok_r(NULL, ",", &saveptr);
+              burstCount++;
+          }
+      }
+    }
+    if (burstCount <= 0) {
+        burstCount = 1;
+    }
+
+    CDBG_HIGH("%s: Snapshot burst count = %d", __func__, burstCount);
+    return (uint8_t)burstCount;
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumOfRetroSnapshots
+ *
+ * DESCRIPTION: get number of retro active snapshots per shutter
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of retro active snapshots per shutter
+ *==========================================================================*/
+uint8_t QCameraParameters::getNumOfRetroSnapshots()
+{
+    int numOfRetroSnapshots = getInt(KEY_QC_NUM_RETRO_BURST_PER_SHUTTER);
+    if (numOfRetroSnapshots < 0) {
+        numOfRetroSnapshots = 0;
+    }
+    CDBG_HIGH("%s: [ZSL Retro] : numOfRetroSnaps - %d", __func__, numOfRetroSnapshots);
+    return (uint8_t)numOfRetroSnapshots;
+}
+
+/*===========================================================================
+ * FUNCTION   : getBurstLEDOnPeriod
+ *
+ * DESCRIPTION: get burst LED on period
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : burst LED on period
+ *==========================================================================*/
+int QCameraParameters::getBurstLEDOnPeriod()
+{
+  CDBG_HIGH("%s: [ZSL Retro] burst LED ON period: %d", __func__, m_nBurstLEDOnPeriod);
+  return m_nBurstLEDOnPeriod;
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumOfExtraHDRInBufsIfNeeded
+ *
+ * DESCRIPTION: get number of extra input buffers needed by HDR
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of extra buffers needed by HDR; 0 if not HDR enabled
+ *==========================================================================*/
+uint8_t QCameraParameters::getNumOfExtraHDRInBufsIfNeeded()
+{
+    unsigned int numOfBufs = 0;
+
+    if (isHDREnabled()) {
+        numOfBufs += m_pCapability->hdr_bracketing_setting.num_frames;
+        if (isHDR1xFrameEnabled() && isHDR1xExtraBufferNeeded()) {
+            numOfBufs++;
+        }
+        numOfBufs--; // Only additional buffers need to be returned
+    }
+
+    return (uint8_t)(numOfBufs * getBurstNum());
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumOfExtraHDROutBufsIfNeeded
+ *
+ * DESCRIPTION: get number of extra output buffers needed by HDR
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of extra buffers needed by HDR; 0 if not HDR enabled
+ *==========================================================================*/
+uint8_t QCameraParameters::getNumOfExtraHDROutBufsIfNeeded()
+{
+    int numOfBufs = 0;
+
+    if (isHDREnabled() && isHDR1xFrameEnabled()) {
+        numOfBufs++;
+    }
+
+    return (uint8_t)(numOfBufs * getBurstNum());
+}
+
+/*===========================================================================
+ * FUNCTION   : getBurstNum
+ *
+ * DESCRIPTION: get burst number of snapshot
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of burst
+ *==========================================================================*/
+uint8_t QCameraParameters::getBurstNum()
+{
+    CDBG_HIGH("%s: m_nBurstNum = %d", __func__, m_nBurstNum);
+    return m_nBurstNum;
+}
+
+/*===========================================================================
+ * FUNCTION   : getJpegQuality
+ *
+ * DESCRIPTION: get jpeg encoding quality
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : jpeg encoding quality
+ *==========================================================================*/
+uint32_t QCameraParameters::getJpegQuality()
+{
+    int quality = getInt(KEY_JPEG_QUALITY);
+    if (quality < 0) {
+        quality = 85; // set to default quality value
+    }
+    return (uint32_t)quality;
+}
+
+/*===========================================================================
+ * FUNCTION   : getRotation
+ *
+ * DESCRIPTION: get application configured rotation
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : rotation value
+ *==========================================================================*/
+uint32_t QCameraParameters::getRotation() {
+    int rotation = 0;
+
+    //If exif rotation is set, do not rotate captured image
+    if (!useJpegExifRotation()) {
+        rotation = mRotation;
+        if (rotation < 0) {
+            rotation = 0;
+        }
+    }
+    return (uint32_t)rotation;
+}
+
+/*===========================================================================
+ * FUNCTION   : setJpegRotation
+ *
+ * DESCRIPTION: set jpeg rotation value configured internally
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : jpeg rotation value
+ *==========================================================================*/
+void QCameraParameters::setJpegRotation(int rotation) {
+    if (rotation == 0 || rotation == 90 ||
+            rotation == 180 || rotation == 270) {
+        mJpegRotation = (uint32_t)rotation;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getDeviceRotation
+ *
+ * DESCRIPTION: get device rotation value
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : device rotation value
+ *==========================================================================*/
+uint32_t QCameraParameters::getDeviceRotation() {
+    int rotation = 0;
+
+    rotation = mRotation;
+    if (rotation < 0) {
+        rotation = 0;
+    }
+
+    return (uint32_t)rotation;
+}
+
+/*===========================================================================
+ * FUNCTION   : getJpegExifRotation
+ *
+ * DESCRIPTION: get exif rotation value
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : rotation value
+ *==========================================================================*/
+uint32_t QCameraParameters::getJpegExifRotation() {
+    int rotation = 0;
+
+    if (useJpegExifRotation()) {
+        rotation = mRotation;
+        if (rotation < 0) {
+            rotation = 0;
+        }
+    }
+    return (uint32_t)rotation;
+}
+
+/*===========================================================================
+ * FUNCTION   : useJpegExifRotation
+ *
+ * DESCRIPTION: Check if jpeg exif rotation need to be used
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true if jpeg exif rotation need to be used
+ *==========================================================================*/
+bool QCameraParameters::useJpegExifRotation() {
+    char exifRotation[PROPERTY_VALUE_MAX];
+
+    property_get("persist.camera.exif.rotation", exifRotation, "off");
+    if (!strcmp(exifRotation, "on")) {
+        return true;
+    }
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : getEffectValue
+ *
+ * DESCRIPTION: get effect value
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : effect value
+ *==========================================================================*/
+int32_t QCameraParameters::getEffectValue()
+{
+    uint32_t cnt = 0;
+    const char *effect = get(KEY_EFFECT);
+    if (effect) {
+        while (NULL != EFFECT_MODES_MAP[cnt].desc) {
+            if (!strcmp(EFFECT_MODES_MAP[cnt].desc, effect)) {
+                return EFFECT_MODES_MAP[cnt].val;
+            }
+            cnt++;
+        }
+    } else {
+        ALOGE("%s: Missing effect value", __func__);
+    }
+    return CAM_EFFECT_MODE_OFF;
+}
+
+/*===========================================================================
+ * FUNCTION   : parseGPSCoordinate
+ *
+ * DESCRIPTION: parse GPS coordinate string
+ *
+ * PARAMETERS :
+ *   @coord_str : [input] coordinate string
+ *   @coord     : [output]  ptr to struct to store coordinate
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraParameters::parseGPSCoordinate(const char *coord_str, rat_t* coord)
+{
+    if(coord == NULL) {
+        ALOGE("%s: error, invalid argument coord == NULL", __func__);
+        return BAD_VALUE;
+    }
+    double degF = atof(coord_str);
+    if (degF < 0) {
+        degF = -degF;
+    }
+    double minF = (degF - (double)(int) degF) * 60.0;
+    double secF = (minF - (double)(int) minF) * 60.0;
+
+    getRational(&coord[0], (int)degF, 1);
+    getRational(&coord[1], (int)minF, 1);
+    getRational(&coord[2], (int)(secF * 10000.0), 10000);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifDateTime
+ *
+ * DESCRIPTION: query exif date time
+ *
+ * PARAMETERS :
+ *   @dateTime    : String to store exif date time.
+ *                  Should be leaved unchanged in case of error.
+ *   @subsecTime  : String to store exif time nanoseconds.
+ *                  Should be leaved unchanged in case of error.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getExifDateTime(String8 &dateTime, String8 &subsecTime)
+{
+    int32_t ret = NO_ERROR;
+
+    //get time and date from system
+    struct timeval tv;
+    struct tm timeinfo_data;
+
+    int res = gettimeofday(&tv, NULL);
+    if (0 == res) {
+        struct tm *timeinfo = localtime_r(&tv.tv_sec, &timeinfo_data);
+        if (NULL != timeinfo) {
+            //Write datetime according to EXIF Spec
+            //"YYYY:MM:DD HH:MM:SS" (20 chars including \0)
+            dateTime = String8::format("%04d:%02d:%02d %02d:%02d:%02d",
+                    timeinfo->tm_year + 1900, timeinfo->tm_mon + 1,
+                    timeinfo->tm_mday, timeinfo->tm_hour,
+                    timeinfo->tm_min, timeinfo->tm_sec);
+            //Write subsec according to EXIF Sepc
+            subsecTime = String8::format("%06ld", tv.tv_usec);
+        } else {
+            ALOGE("%s: localtime_r() error", __func__);
+            ret = UNKNOWN_ERROR;
+        }
+    } else if (-1 == res) {
+        ALOGE("%s: gettimeofday() error: %s", __func__, strerror(errno));
+        ret = UNKNOWN_ERROR;
+    } else {
+        ALOGE("%s: gettimeofday() unexpected return code: %d", __func__, res);
+        ret = UNKNOWN_ERROR;
+    }
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getRational
+ *
+ * DESCRIPTION: compose rational struct
+ *
+ * PARAMETERS :
+ *   @rat     : ptr to struct to store rational info
+ *   @num     :num of the rational
+ *   @denom   : denom of the rational
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getRational(rat_t *rat, int num, int denom)
+{
+    if ((0 > num) || (0 > denom)) {
+        ALOGE("%s: Negative values", __func__);
+        return BAD_VALUE;
+    }
+    if (NULL == rat) {
+        ALOGE("%s: NULL rat input", __func__);
+        return BAD_VALUE;
+    }
+    rat->num = (uint32_t)num;
+    rat->denom = (uint32_t)denom;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifFocalLength
+ *
+ * DESCRIPTION: get exif focal lenght
+ *
+ * PARAMETERS :
+ *   @focalLength : ptr to rational strcut to store focal lenght
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getExifFocalLength(rat_t *focalLength)
+{
+    int focalLengthValue =
+        (int)(getFloat(QCameraParameters::KEY_FOCAL_LENGTH) * FOCAL_LENGTH_DECIMAL_PRECISION);
+    return getRational(focalLength, focalLengthValue, FOCAL_LENGTH_DECIMAL_PRECISION);
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifIsoSpeed
+ *
+ * DESCRIPTION: get exif ISO speed
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : ISO speed value
+ *==========================================================================*/
+uint16_t QCameraParameters::getExifIsoSpeed()
+{
+    uint16_t isoSpeed = 0;
+    const char *iso_str = get(QCameraParameters::KEY_QC_ISO_MODE);
+    int iso_index = lookupAttr(ISO_MODES_MAP, PARAM_MAP_SIZE(ISO_MODES_MAP), iso_str);
+    switch (iso_index) {
+    case CAM_ISO_MODE_AUTO:
+        isoSpeed = 0;
+        break;
+    case CAM_ISO_MODE_DEBLUR:
+        isoSpeed = 1;
+        break;
+    case CAM_ISO_MODE_100:
+        isoSpeed = 100;
+        break;
+    case CAM_ISO_MODE_200:
+        isoSpeed = 200;
+        break;
+    case CAM_ISO_MODE_400:
+        isoSpeed = 400;
+        break;
+    case CAM_ISO_MODE_800:
+        isoSpeed = 800;
+        break;
+    case CAM_ISO_MODE_1600:
+        isoSpeed = 1600;
+        break;
+    case CAM_ISO_MODE_3200:
+        isoSpeed = 3200;
+        break;
+    }
+    return isoSpeed;
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifGpsProcessingMethod
+ *
+ * DESCRIPTION: get GPS processing method
+ *
+ * PARAMETERS :
+ *   @gpsProcessingMethod : string to store GPS process method
+ *   @count               : lenght of the string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getExifGpsProcessingMethod(char *gpsProcessingMethod,
+                                                      uint32_t &count)
+{
+    const char *str = get(KEY_GPS_PROCESSING_METHOD);
+    if(str != NULL) {
+        memcpy(gpsProcessingMethod, ExifAsciiPrefix, EXIF_ASCII_PREFIX_SIZE);
+        count = EXIF_ASCII_PREFIX_SIZE;
+        strlcpy(gpsProcessingMethod + EXIF_ASCII_PREFIX_SIZE, str, strlen(str)+1);
+        count += (uint32_t)strlen(str);
+        gpsProcessingMethod[count++] = '\0'; // increase 1 for the last NULL char
+        return NO_ERROR;
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifLatitude
+ *
+ * DESCRIPTION: get exif latitude
+ *
+ * PARAMETERS :
+ *   @latitude : ptr to rational struct to store latitude info
+ *   @ladRef   : charater to indicate latitude reference
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getExifLatitude(rat_t *latitude,
+                                           char *latRef)
+{
+    const char *str = get(KEY_GPS_LATITUDE);
+    if(str != NULL) {
+        parseGPSCoordinate(str, latitude);
+
+        //set Latitude Ref
+        float latitudeValue = getFloat(KEY_GPS_LATITUDE);
+        if(latitudeValue < 0.0f) {
+            latRef[0] = 'S';
+        } else {
+            latRef[0] = 'N';
+        }
+        latRef[1] = '\0';
+        return NO_ERROR;
+    }else{
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifLongitude
+ *
+ * DESCRIPTION: get exif longitude
+ *
+ * PARAMETERS :
+ *   @longitude : ptr to rational struct to store longitude info
+ *   @lonRef    : charater to indicate longitude reference
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getExifLongitude(rat_t *longitude,
+                                            char *lonRef)
+{
+    const char *str = get(KEY_GPS_LONGITUDE);
+    if(str != NULL) {
+        parseGPSCoordinate(str, longitude);
+
+        //set Longitude Ref
+        float longitudeValue = getFloat(KEY_GPS_LONGITUDE);
+        if(longitudeValue < 0.0f) {
+            lonRef[0] = 'W';
+        } else {
+            lonRef[0] = 'E';
+        }
+        lonRef[1] = '\0';
+        return NO_ERROR;
+    }else{
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifAltitude
+ *
+ * DESCRIPTION: get exif altitude
+ *
+ * PARAMETERS :
+ *   @altitude : ptr to rational struct to store altitude info
+ *   @altRef   : charater to indicate altitude reference
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getExifAltitude(rat_t *altitude,
+                                           char *altRef)
+{
+    const char *str = get(KEY_GPS_ALTITUDE);
+    if(str != NULL) {
+        double value = atof(str);
+        *altRef = 0;
+        if(value < 0){
+            *altRef = 1;
+            value = -value;
+        }
+        return getRational(altitude, (int)(value*1000), 1000);
+    }else{
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifGpsDateTimeStamp
+ *
+ * DESCRIPTION: get exif GPS date time stamp
+ *
+ * PARAMETERS :
+ *   @gpsDateStamp : GPS date time stamp string
+ *   @bufLen       : length of the string
+ *   @gpsTimeStamp : ptr to rational struct to store time stamp info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::getExifGpsDateTimeStamp(char *gpsDateStamp,
+                                                   uint32_t bufLen,
+                                                   rat_t *gpsTimeStamp)
+{
+    const char *str = get(KEY_GPS_TIMESTAMP);
+    if(str != NULL) {
+        time_t unixTime = (time_t)atol(str);
+        struct tm *UTCTimestamp = gmtime(&unixTime);
+
+        if(!UTCTimestamp) {
+            ALOGE("%s: UTCTimestamp is null\n", __func__);
+            return BAD_VALUE;
+        }
+
+        strftime(gpsDateStamp, bufLen, "%Y:%m:%d", UTCTimestamp);
+
+        getRational(&gpsTimeStamp[0], UTCTimestamp->tm_hour, 1);
+        getRational(&gpsTimeStamp[1], UTCTimestamp->tm_min, 1);
+        getRational(&gpsTimeStamp[2], UTCTimestamp->tm_sec, 1);
+
+        return NO_ERROR;
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : updateFocusDistances
+ *
+ * DESCRIPTION: update focus distances
+ *
+ * PARAMETERS :
+ *   @focusDistances : ptr to focus distance info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::updateFocusDistances(cam_focus_distances_info_t *focusDistances)
+{
+    String8 str;
+    char buffer[32] = {0};
+    //set all distances to infinity if focus mode is infinity
+    if(mFocusMode == CAM_FOCUS_MODE_INFINITY) {
+        str.append("Infinity,Infinity,Infinity");
+    } else {
+        snprintf(buffer, sizeof(buffer), "%f", focusDistances->focus_distance[0]);
+        str.append(buffer);
+        snprintf(buffer, sizeof(buffer), ",%f", focusDistances->focus_distance[1]);
+        str.append(buffer);
+        snprintf(buffer, sizeof(buffer), ",%f", focusDistances->focus_distance[2]);
+        str.append(buffer);
+    }
+    CDBG_HIGH("%s: setting KEY_FOCUS_DISTANCES as %s", __FUNCTION__, str.string());
+    set(QCameraParameters::KEY_FOCUS_DISTANCES, str.string());
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : updateRecordingHintValue
+ *
+ * DESCRIPTION: update recording hint locally and to daemon
+ *
+ * PARAMETERS :
+ *   @value   : video hint value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::updateRecordingHintValue(int32_t value)
+{
+    int32_t rc = NO_ERROR;
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    rc = setRecordingHintValue(value);
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to update table", __func__);
+        return rc;
+    }
+
+    if(m_bDISEnabled && (value==1)) {
+        CDBG_HIGH("%s: %d: Setting DIS value again!!", __func__, __LINE__);
+        setDISValue(VALUE_ENABLE);
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to update recording hint", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHistogram
+ *
+ * DESCRIPTION: set histogram
+ *
+ * PARAMETERS :
+ *   @enabled : if histogram is enabled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setHistogram(bool enabled)
+{
+    if(m_bHistogramEnabled == enabled) {
+        CDBG_HIGH("%s: histogram flag not changed, no ops here", __func__);
+        return NO_ERROR;
+    }
+
+    // set parm for histogram
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    int32_t value = enabled ? 1 : 0;
+    int32_t rc = NO_ERROR;
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_HISTOGRAM, value)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to set histogram", __func__);
+        return rc;
+    }
+
+    m_bHistogramEnabled = enabled;
+
+    CDBG_HIGH(" Histogram -> %s", m_bHistogramEnabled ? "Enabled" : "Disabled");
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setIntEvent
+ *
+ * DESCRIPTION: set setIntEvent
+ *
+ * PARAMETERS :
+ *   @params : image size and dimensions
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setIntEvent(cam_int_evt_params_t params)
+{
+    int32_t rc = NO_ERROR;
+
+    if ( m_pParamBuf == NULL ) {
+        return NO_INIT;
+    }
+
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    //Sending snapshot taken notification back to Eztune"
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_INT_EVT, params)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to set frameskip info parm", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFaceDetectionOption
+ *
+ * DESCRIPTION: set if face detection is enabled by SendCommand
+ *
+ * PARAMETERS :
+ *   @enabled : bool flag if face detection should be enabled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+ int32_t QCameraParameters::setFaceDetectionOption(bool enabled)
+{
+    m_bFaceDetectionOn = enabled;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFaceDetection
+ *
+ * DESCRIPTION: set face detection
+ *
+ * PARAMETERS :
+ *   @enabled : if face detection is enabled
+ *   @initCommit : if configuration list need to be initialized and commited
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFaceDetection(bool enabled, bool initCommit)
+{
+    uint32_t faceProcMask = m_nFaceProcMask;
+    // set face detection mask
+    if (enabled) {
+        faceProcMask |= CAM_FACE_PROCESS_MASK_DETECTION;
+    } else {
+        faceProcMask &= ~CAM_FACE_PROCESS_MASK_DETECTION;
+    }
+
+    if(m_nFaceProcMask == faceProcMask) {
+        CDBG_HIGH("%s: face process mask not changed, no ops here", __func__);
+        return NO_ERROR;
+    }
+
+    m_nFaceProcMask = faceProcMask;
+
+    // set parm for face detection
+    uint32_t requested_faces = (uint32_t)getInt(KEY_QC_MAX_NUM_REQUESTED_FACES);
+    cam_fd_set_parm_t fd_set_parm;
+    memset(&fd_set_parm, 0, sizeof(cam_fd_set_parm_t));
+    fd_set_parm.fd_mode = faceProcMask;
+    fd_set_parm.num_fd = requested_faces;
+
+    CDBG_HIGH("[KPI Perf] %s: PROFILE_FACE_DETECTION_VALUE = %d num_fd = %d",
+          __func__, faceProcMask,requested_faces);
+
+    if (initCommit) {
+        if(initBatchUpdate(m_pParamBuf) < 0 ) {
+            ALOGE("%s:Failed to initialize group update table", __func__);
+            return BAD_TYPE;
+        }
+    }
+
+    int32_t rc = NO_ERROR;
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_FD, fd_set_parm)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    if (initCommit) {
+        rc = commitSetBatch();
+        if (rc != NO_ERROR) {
+            ALOGE("%s:Failed to set face detection parm", __func__);
+            return rc;
+        }
+    }
+
+    CDBG_HIGH("%s: FaceProcMask -> %d", __func__, m_nFaceProcMask);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFrameSkip
+ *
+ * DESCRIPTION: send ISP frame skip pattern to camera daemon
+ *
+ * PARAMETERS :
+ *   @pattern : skip pattern for ISP
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setFrameSkip(enum msm_vfe_frame_skip_pattern pattern)
+{
+    int32_t rc = NO_ERROR;
+
+    if ( m_pParamBuf == NULL ) {
+        return NO_INIT;
+    }
+
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_FRAMESKIP, (int32_t)pattern)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to set frameskip info parm", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+int32_t QCameraParameters::updateRAW(cam_dimension_t max_dim)
+{
+    int32_t rc = NO_ERROR;
+    cam_dimension_t raw_dim;
+
+    if (max_dim.width == 0 || max_dim.height == 0) {
+        max_dim = m_pCapability->raw_dim[0];
+    }
+
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_MAX_DIMENSION, max_dim)) {
+        ALOGE("%s:Failed to update table for CAM_INTF_PARM_MAX_DIMENSION ", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to set lock CAM_INTF_PARM_MAX_DIMENSION parm", __func__);
+        return rc;
+    }
+
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    ADD_GET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_RAW_DIMENSION);
+
+    rc = commitGetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to get commit CAM_INTF_PARM_RAW_DIMENSION", __func__);
+        return rc;
+    }
+
+    READ_PARAM_ENTRY(m_pParamBuf, CAM_INTF_PARM_RAW_DIMENSION, raw_dim);
+
+    CDBG_HIGH("%s : RAW Dimension = %d X %d",__func__,raw_dim.width,raw_dim.height);
+    if (raw_dim.width == 0 || raw_dim.height == 0) {
+        ALOGE("%s: Error getting RAW size. Setting to Capability value",__func__);
+        raw_dim = m_pCapability->raw_dim[0];
+    }
+    setRawSize(raw_dim);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHDRSceneEnable
+ *
+ * DESCRIPTION: sets hdr scene deteced flag
+ *
+ * PARAMETERS :
+ *   @bflag : hdr scene deteced
+ *
+ * RETURN     : nothing
+ *==========================================================================*/
+void QCameraParameters::setHDRSceneEnable(bool bflag)
+{
+    bool bupdate = false;
+    if (m_HDRSceneEnabled != bflag) {
+        bupdate = true;
+    }
+    m_HDRSceneEnabled = bflag;
+
+    if (bupdate) {
+        updateFlash(true);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getASDStateString
+ *
+ * DESCRIPTION: get ASD result in string format
+ *
+ * PARAMETERS :
+ *   @scene : selected scene mode
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+ const char *QCameraParameters::getASDStateString(cam_auto_scene_t scene)
+{
+    switch (scene) {
+      case S_NORMAL :
+        return "Normal";
+      case S_SCENERY:
+        return "Scenery";
+      case S_PORTRAIT:
+        return "Portrait";
+      case S_PORTRAIT_BACKLIGHT:
+        return "Portrait-Backlight";
+      case S_SCENERY_BACKLIGHT:
+        return "Scenery-Backlight";
+      case S_BACKLIGHT:
+        return "Backlight";
+      default:
+        return "<Unknown!>";
+      }
+}
+
+/*===========================================================================
+ * FUNCTION   : parseNDimVector
+ *
+ * DESCRIPTION: helper function to parse a string like "(1, 2, 3, 4, ..., N)"
+ *              into N-dimension vector
+ *
+ * PARAMETERS :
+ *   @str     : string to be parsed
+ *   @num     : output array of size N to store vector element values
+ *   @N       : number of dimension
+ *   @delim   : delimeter to seperete string
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::parseNDimVector(const char *str, int *num, int N, char delim = ',')
+{
+    char *start, *end;
+    if (num == NULL) {
+        ALOGE("%s: Invalid output array (num == NULL)", __func__);
+        return BAD_VALUE;
+    }
+
+    //check if string starts and ends with parantheses
+    if(str[0] != '(' || str[strlen(str)-1] != ')') {
+        ALOGE("%s: Invalid format of string %s, valid format is (n1, n2, n3, n4 ...)",
+              __func__, str);
+        return BAD_VALUE;
+    }
+    start = (char*) str;
+    start++;
+    for(int i=0; i<N; i++) {
+        *(num+i) = (int) strtol(start, &end, 10);
+        if(*end != delim && i < N-1) {
+            ALOGE("%s: Cannot find delimeter '%c' in string \"%s\". end = %c",
+                  __func__, delim, str, *end);
+            return -1;
+        }
+        start = end+1;
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : parseCameraAreaString
+ *
+ * DESCRIPTION: helper function to parse a string of camera areas like
+ *              "(1, 2, 3, 4, 5),(1, 2, 3, 4, 5),..."
+ *
+ * PARAMETERS :
+ *   @str             : string to be parsed
+ *   @max_num_areas   : max number of areas
+ *   @pAreas          : ptr to struct to store areas
+ *   @num_areas_found : number of areas found
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::parseCameraAreaString(const char *str,
+                                                 int max_num_areas,
+                                                 cam_area_t *pAreas,
+                                                 int& num_areas_found)
+{
+    char area_str[32];
+    const char *start, *end, *p;
+    start = str; end = NULL;
+    int values[5], index=0;
+    num_areas_found = 0;
+
+    memset(values, 0, sizeof(values));
+    while(start != NULL) {
+       if(*start != '(') {
+            ALOGE("%s: error: Ill formatted area string: %s", __func__, str);
+            return BAD_VALUE;
+       }
+       end = strchr(start, ')');
+       if(end == NULL) {
+            ALOGE("%s: error: Ill formatted area string: %s", __func__, str);
+            return BAD_VALUE;
+       }
+       int i;
+       for (i=0,p=start; p<=end; p++, i++) {
+           area_str[i] = *p;
+       }
+       area_str[i] = '\0';
+       if(parseNDimVector(area_str, values, 5) < 0){
+            ALOGE("%s: error: Failed to parse the area string: %s", __func__, area_str);
+            return BAD_VALUE;
+       }
+       // no more areas than max_num_areas are accepted.
+       if(index >= max_num_areas) {
+            ALOGE("%s: error: too many areas specified %s", __func__, str);
+            return BAD_VALUE;
+       }
+       pAreas[index].rect.left = values[0];
+       pAreas[index].rect.top = values[1];
+       pAreas[index].rect.width = values[2] - values[0];
+       pAreas[index].rect.height = values[3] - values[1];
+       pAreas[index].weight = values[4];
+
+       index++;
+       start = strchr(end, '('); // serach for next '('
+    }
+    num_areas_found = index;
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : validateCameraAreas
+ *
+ * DESCRIPTION: helper function to validate camera areas within (-1000, 1000)
+ *
+ * PARAMETERS :
+ *   @areas     : ptr to array of areas
+ *   @num_areas : number of areas
+ *
+ * RETURN     : true --  area is in valid range
+ *              false -- not valid
+ *==========================================================================*/
+bool QCameraParameters::validateCameraAreas(cam_area_t *areas, int num_areas)
+{
+    // special case: default area
+    if (num_areas == 1 &&
+        areas[0].rect.left == 0 &&
+        areas[0].rect.top == 0 &&
+        areas[0].rect.width == 0 &&
+        areas[0].rect.height == 0 &&
+        areas[0].weight == 0) {
+        return true;
+    }
+
+    for(int i = 0; i < num_areas; i++) {
+        // left should be >= -1000
+        if(areas[i].rect.left < -1000) {
+            return false;
+        }
+
+        // top  should be >= -1000
+        if(areas[i].rect.top < -1000) {
+            return false;
+        }
+
+        // width or height should be > 0
+        if (areas[i].rect.width <= 0 || areas[i].rect.height <= 0) {
+            return false;
+        }
+
+        // right  should be <= 1000
+        if(areas[i].rect.left + areas[i].rect.width > 1000) {
+            return false;
+        }
+
+        // bottom should be <= 1000
+        if(areas[i].rect.top + areas[i].rect.height > 1000) {
+            return false;
+        }
+
+        // weight should be within (1, 1000)
+        if (areas[i].weight < 1 || areas[i].weight > 1000) {
+            return false;
+        }
+    }
+    return true;
+}
+
+/*===========================================================================
+ * FUNCTION   : isYUVFrameInfoNeeded
+ *
+ * DESCRIPTION: In AE-Bracket mode, we need set yuv buffer information for up-layer
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCameraParameters::isYUVFrameInfoNeeded()
+{
+    //In AE-Bracket mode, we need set raw buffer information for up-layer
+    if(!isNV21PictureFormat() && !isNV16PictureFormat()){
+        return false;
+    }
+    const char *aecBracketStr =  get(KEY_QC_AE_BRACKET_HDR);
+
+    int value = lookupAttr(BRACKETING_MODES_MAP, PARAM_MAP_SIZE(BRACKETING_MODES_MAP),
+            aecBracketStr);
+    CDBG_HIGH("%s: aecBracketStr=%s, value=%d.", __func__, aecBracketStr, value);
+    return (value == CAM_EXP_BRACKETING_ON);
+}
+
+/*===========================================================================
+ * FUNCTION   : getFrameFmtString
+ *
+ * DESCRIPTION: get string name of frame format
+ *
+ * PARAMETERS :
+ *   @frame   : frame format
+ *
+ * RETURN     : string name of frame format
+ *==========================================================================*/
+const char *QCameraParameters::getFrameFmtString(cam_format_t fmt)
+{
+    return lookupNameByValue(PICTURE_TYPES_MAP, PARAM_MAP_SIZE(PICTURE_TYPES_MAP), fmt);
+}
+
+/*===========================================================================
+ * FUNCTION   : initBatchUpdate
+ *
+ * DESCRIPTION: init camera parameters buf entries
+ *
+ * PARAMETERS :
+ *   @p_table : ptr to parameter buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::initBatchUpdate(parm_buffer_t *p_table)
+{
+    m_tempMap.clear();
+
+    clear_metadata_buffer(p_table);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : commitSetBatch
+ *
+ * DESCRIPTION: commit all set parameters in the batch work to backend
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::commitSetBatch()
+{
+    int32_t rc = NO_ERROR;
+    int32_t i = 0;
+
+    if (NULL == m_pParamBuf) {
+        ALOGE("%s: Params not initialized", __func__);
+        return NO_INIT;
+    }
+
+    /* Loop to check if atleast one entry is valid */
+    for(i = 0; i < CAM_INTF_PARM_MAX; i++){
+        if(m_pParamBuf->is_valid[i])
+            break;
+    }
+
+    if (NULL == m_pCamOpsTbl) {
+        ALOGE("%s: Ops not initialized", __func__);
+        return NO_INIT;
+    }
+
+    if (i < CAM_INTF_PARM_MAX) {
+        rc = m_pCamOpsTbl->ops->set_parms(m_pCamOpsTbl->camera_handle, m_pParamBuf);
+    }
+    if (rc == NO_ERROR) {
+        // commit change from temp storage into param map
+        rc = commitParamChanges();
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : commitGetBatch
+ *
+ * DESCRIPTION: commit all get parameters in the batch work to backend
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::commitGetBatch()
+{
+    int32_t rc = NO_ERROR;
+    int32_t i = 0;
+
+    if (NULL == m_pParamBuf) {
+        ALOGE("%s: Params not initialized", __func__);
+        return NO_INIT;
+    }
+
+    /* Loop to check if atleast one entry is valid */
+    for(i = 0; i < CAM_INTF_PARM_MAX; i++){
+        if(m_pParamBuf->is_valid[i])
+            break;
+    }
+
+    if (NULL == m_pCamOpsTbl) {
+        ALOGE("%s: Ops not initialized", __func__);
+        return NO_INIT;
+    }
+
+    if (i < CAM_INTF_PARM_MAX) {
+        return m_pCamOpsTbl->ops->get_parms(m_pCamOpsTbl->camera_handle, m_pParamBuf);
+    } else {
+        return NO_ERROR;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : updateParamEntry
+ *
+ * DESCRIPTION: update a parameter entry in the local temp map obj
+ *
+ * PARAMETERS :
+ *   @key     : key of the entry
+ *   @value   : value of the entry
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::updateParamEntry(const char *key, const char *value)
+{
+    m_tempMap.replaceValueFor(String8(key), String8(value));
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : commitParamChanges
+ *
+ * DESCRIPTION: commit all changes in local temp map obj into parameter obj
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::commitParamChanges()
+{
+    size_t size = m_tempMap.size();
+    for (size_t i = 0; i < size; i++) {
+        String8 k, v;
+        k = m_tempMap.keyAt(i);
+        v = m_tempMap.valueAt(i);
+        set(k, v);
+    }
+    m_tempMap.clear();
+
+    // update local changes
+    m_bRecordingHint = m_bRecordingHint_new;
+    m_bZslMode = m_bZslMode_new;
+
+    /* After applying scene mode auto,
+      Camera effects need to be reapplied */
+    if ( m_bSceneTransitionAuto ) {
+        m_bUpdateEffects = true;
+        m_bSceneTransitionAuto = false;
+    }
+
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraReprocScaleParam
+ *
+ * DESCRIPTION: constructor of QCameraReprocScaleParam
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraReprocScaleParam::QCameraReprocScaleParam(QCameraParameters *parent)
+  : mParent(parent),
+    mScaleEnabled(false),
+    mIsUnderScaling(false),
+    mScaleDirection(0),
+    mNeedScaleCnt(0),
+    mSensorSizeTblCnt(0),
+    mSensorSizeTbl(NULL),
+    mTotalSizeTblCnt(0)
+{
+    mPicSizeFromAPK.width = 0;
+    mPicSizeFromAPK.height = 0;
+    mPicSizeSetted.width = 0;
+    mPicSizeSetted.height = 0;
+    memset(mNeedScaledSizeTbl, 0, sizeof(mNeedScaledSizeTbl));
+    memset(mTotalSizeTbl, 0, sizeof(mTotalSizeTbl));
+}
+
+/*===========================================================================
+ * FUNCTION   : ~~QCameraReprocScaleParam
+ *
+ * DESCRIPTION: destructor of QCameraReprocScaleParam
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraReprocScaleParam::~QCameraReprocScaleParam()
+{
+    //do nothing now.
+}
+
+/*===========================================================================
+ * FUNCTION   : setScaledSizeTbl
+ *
+ * DESCRIPTION: re-set picture size table with dimensions that need scaling if Reproc Scale is enabled
+ *
+ * PARAMETERS :
+ *   @scale_cnt   : count of picture sizes that want scale
+ *   @scale_tbl    : picture size table that want scale
+ *   @org_cnt     : sensor supported picture size count
+ *   @org_tbl      : sensor supported picture size table
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocScaleParam::setScaleSizeTbl(size_t scale_cnt,
+        cam_dimension_t *scale_tbl, size_t org_cnt, cam_dimension_t *org_tbl)
+{
+    int32_t rc = NO_ERROR;
+    size_t i;
+    mNeedScaleCnt = 0;
+
+    if(!mScaleEnabled || scale_cnt <=0 || scale_tbl == NULL || org_cnt <=0 || org_tbl == NULL){
+        return BAD_VALUE;    // Do not need scale, so also need not reset picture size table
+    }
+
+    mSensorSizeTblCnt = org_cnt;
+    mSensorSizeTbl = org_tbl;
+    mNeedScaleCnt = checkScaleSizeTable(scale_cnt, scale_tbl, org_cnt, org_tbl);
+    if(mNeedScaleCnt <= 0){
+        ALOGE("%s: do not have picture sizes need scaling.", __func__);
+        return BAD_VALUE;
+    }
+
+    if(mNeedScaleCnt + org_cnt > MAX_SIZES_CNT){
+        ALOGE("%s: picture size list exceed the max count.", __func__);
+        return BAD_VALUE;
+    }
+
+    //get the total picture size table
+    mTotalSizeTblCnt = mNeedScaleCnt + org_cnt;
+
+    if (mNeedScaleCnt > MAX_SCALE_SIZES_CNT) {
+        ALOGE("%s: Error!! mNeedScaleCnt (%d) is more than MAX_SCALE_SIZES_CNT",
+                __func__, mNeedScaleCnt);
+        return BAD_VALUE;
+    }
+
+    for(i = 0; i < mNeedScaleCnt; i++){
+        mTotalSizeTbl[i].width = mNeedScaledSizeTbl[i].width;
+        mTotalSizeTbl[i].height = mNeedScaledSizeTbl[i].height;
+        CDBG_HIGH("%s: scale picture size: i =%d, width=%d, height=%d.", __func__,
+            i, mTotalSizeTbl[i].width, mTotalSizeTbl[i].height);
+    }
+    for(; i < mTotalSizeTblCnt; i++){
+        mTotalSizeTbl[i].width = org_tbl[i-mNeedScaleCnt].width;
+        mTotalSizeTbl[i].height = org_tbl[i-mNeedScaleCnt].height;
+        CDBG_HIGH("%s: sensor supportted picture size: i =%d, width=%d, height=%d.", __func__,
+            i, mTotalSizeTbl[i].width, mTotalSizeTbl[i].height);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getScaledSizeTblCnt
+ *
+ * DESCRIPTION: get picture size cnt that need scale
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : uint8_t type of picture size count
+ *==========================================================================*/
+size_t QCameraReprocScaleParam::getScaleSizeTblCnt()
+{
+    return mNeedScaleCnt;
+}
+
+/*===========================================================================
+ * FUNCTION   : getScaledSizeTbl
+ *
+ * DESCRIPTION: get picture size table that need scale
+ *
+ * PARAMETERS :  none
+ *
+ * RETURN     : cam_dimension_t list of picture size table
+ *==========================================================================*/
+cam_dimension_t *QCameraReprocScaleParam::getScaledSizeTbl()
+{
+    if(!mScaleEnabled)
+        return NULL;
+
+    return mNeedScaledSizeTbl;
+}
+
+/*===========================================================================
+ * FUNCTION   : setScaleEnable
+ *
+ * DESCRIPTION: enable or disable Reproc Scale
+ *
+ * PARAMETERS :
+ *   @enabled : enable: 1; disable 0
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraReprocScaleParam::setScaleEnable(bool enabled)
+{
+    mScaleEnabled = enabled;
+}
+
+/*===========================================================================
+ * FUNCTION   : isScaleEnabled
+ *
+ * DESCRIPTION: check if Reproc Scale is enabled
+ *
+ * PARAMETERS :  none
+ *
+ * RETURN     : bool type of status
+ *==========================================================================*/
+bool QCameraReprocScaleParam::isScaleEnabled()
+{
+    return mScaleEnabled;
+}
+
+/*===========================================================================
+ * FUNCTION   : isScalePicSize
+ *
+ * DESCRIPTION: check if current picture size is from Scale Table
+ *
+ * PARAMETERS :
+ *   @width     : current picture width
+ *   @height    : current picture height
+ *
+ * RETURN     : bool type of status
+ *==========================================================================*/
+bool QCameraReprocScaleParam::isScalePicSize(int width, int height)
+{
+    //Check if the picture size is in scale table
+    if(mNeedScaleCnt <= 0)
+        return FALSE;
+
+    for (size_t i = 0; i < mNeedScaleCnt; i++) {
+        if ((mNeedScaledSizeTbl[i].width == width) && (mNeedScaledSizeTbl[i].height == height)) {
+            //found match
+            return TRUE;
+        }
+    }
+
+    ALOGE("%s: Not in scale picture size table.", __func__);
+    return FALSE;
+}
+
+/*===========================================================================
+ * FUNCTION   : isValidatePicSize
+ *
+ * DESCRIPTION: check if current picture size is validate
+ *
+ * PARAMETERS :
+ *   @width     : current picture width
+ *   @height    : current picture height
+ *
+ * RETURN     : bool type of status
+ *==========================================================================*/
+bool QCameraReprocScaleParam::isValidatePicSize(int width, int height)
+{
+    size_t i = 0;
+
+    for(i = 0; i < mSensorSizeTblCnt; i++){
+        if(mSensorSizeTbl[i].width == width
+            && mSensorSizeTbl[i].height== height){
+            return TRUE;
+        }
+    }
+
+    for(i = 0; i < mNeedScaleCnt; i++){
+        if(mNeedScaledSizeTbl[i].width == width
+            && mNeedScaledSizeTbl[i].height== height){
+            return TRUE;
+        }
+    }
+
+    ALOGE("%s: Invalidate input picture size.", __func__);
+    return FALSE;
+}
+
+/*===========================================================================
+ * FUNCTION   : setSensorSupportedPicSize
+ *
+ * DESCRIPTION: set sensor supported picture size.
+ *    For Snapshot stream size configuration, we need use sensor supported size.
+ *    We will use CPP to do Scaling based on output Snapshot stream.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocScaleParam::setSensorSupportedPicSize()
+{
+    //will find a suitable picture size (here we leave a prossibility to add other scale requirement)
+    //Currently we only focus on upscaling, and checkScaleSizeTable() has guaranteed the dimension ratio.
+
+    if(!mIsUnderScaling || mSensorSizeTblCnt <= 0)
+        return BAD_VALUE;
+
+    //We just get the max sensor supported size here.
+    mPicSizeSetted.width = mSensorSizeTbl[0].width;
+    mPicSizeSetted.height = mSensorSizeTbl[0].height;
+
+    return NO_ERROR;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : setValidatePicSize
+ *
+ * DESCRIPTION: set sensor supported size and change scale status.
+ *
+ * PARAMETERS :
+ *   @width    : input picture width
+ *   @height   : input picture height
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocScaleParam::setValidatePicSize(int &width,int &height)
+{
+    if(!mScaleEnabled)
+        return BAD_VALUE;
+
+    mIsUnderScaling = FALSE; //default: not under scale
+
+    if(isScalePicSize(width, height)){
+        // input picture size need scaling operation. Record size from APK and setted
+        mIsUnderScaling = TRUE;
+        mPicSizeFromAPK.width = width;
+        mPicSizeFromAPK.height = height;
+
+        if(setSensorSupportedPicSize() != NO_ERROR)
+            return BAD_VALUE;
+
+        //re-set picture size to sensor supported size
+        width = mPicSizeSetted.width;
+        height = mPicSizeSetted.height;
+        CDBG_HIGH("%s: mPicSizeFromAPK- with=%d, height=%d, mPicSizeSetted- with =%d, height=%d.",
+            __func__, mPicSizeFromAPK.width, mPicSizeFromAPK.height, mPicSizeSetted.width, mPicSizeSetted.height);
+    }else{
+        mIsUnderScaling = FALSE;
+        //no scale is needed for input picture size
+        if(!isValidatePicSize(width, height)){
+            ALOGE("%s: invalidate input picture size.", __func__);
+            return BAD_VALUE;
+        }
+        mPicSizeSetted.width = width;
+        mPicSizeSetted.height = height;
+    }
+
+    CDBG_HIGH("%s: X. mIsUnderScaling=%d, width=%d, height=%d.", __func__, mIsUnderScaling, width, height);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPicSizeFromAPK
+ *
+ * DESCRIPTION: get picture size that get from APK
+ *
+ * PARAMETERS :
+ *   @width     : input width
+ *   @height    : input height
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocScaleParam::getPicSizeFromAPK(int &width, int &height)
+{
+    if(!mIsUnderScaling)
+        return BAD_VALUE;
+
+    width = mPicSizeFromAPK.width;
+    height = mPicSizeFromAPK.height;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPicSizeSetted
+ *
+ * DESCRIPTION: get picture size that setted into mm-camera
+ *
+ * PARAMETERS :
+ *   @width     : input width
+ *   @height    : input height
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraReprocScaleParam::getPicSizeSetted(int &width, int &height)
+{
+    width = mPicSizeSetted.width;
+    height = mPicSizeSetted.height;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : isUnderScaling
+ *
+ * DESCRIPTION: check if we are in Reproc Scaling requirment
+ *
+ * PARAMETERS :  none
+ *
+ * RETURN     : bool type of status
+ *==========================================================================*/
+bool QCameraReprocScaleParam::isUnderScaling()
+{
+    return mIsUnderScaling;
+}
+
+/*===========================================================================
+ * FUNCTION   : checkScaleSizeTable
+ *
+ * DESCRIPTION: check PICTURE_SIZE_NEED_SCALE to choose
+ *
+ * PARAMETERS :
+ *   @scale_cnt   : count of picture sizes that want scale
+ *   @scale_tbl    : picture size table that want scale
+ *   @org_cnt     : sensor supported picture size count
+ *   @org_tbl      : sensor supported picture size table
+ *
+ * RETURN     : bool type of status
+ *==========================================================================*/
+size_t QCameraReprocScaleParam::checkScaleSizeTable(size_t scale_cnt,
+        cam_dimension_t *scale_tbl, size_t org_cnt, cam_dimension_t *org_tbl)
+{
+    size_t stbl_cnt = 0;
+    size_t temp_cnt = 0;
+    ssize_t i = 0;
+    if(scale_cnt <=0 || scale_tbl == NULL || org_tbl == NULL || org_cnt <= 0)
+        return stbl_cnt;
+
+    //get validate scale size table. Currently we only support:
+    // 1. upscale. The scale size must larger than max sensor supported size
+    // 2. Scale dimension ratio must be same as the max sensor supported size.
+    temp_cnt = scale_cnt;
+    for (i = (ssize_t)(scale_cnt - 1); i >= 0; i--) {
+        if (scale_tbl[i].width > org_tbl[0].width ||
+                (scale_tbl[i].width == org_tbl[0].width &&
+                    scale_tbl[i].height > org_tbl[0].height)) {
+            //get the smallest scale size
+            break;
+        }
+        temp_cnt--;
+    }
+
+    //check dimension ratio
+    double supported_ratio = (double)org_tbl[0].width / (double)org_tbl[0].height;
+    for (i = 0; i < (ssize_t)temp_cnt; i++) {
+        double cur_ratio = (double)scale_tbl[i].width / (double)scale_tbl[i].height;
+        if (fabs(supported_ratio - cur_ratio) > ASPECT_TOLERANCE) {
+            continue;
+        }
+        mNeedScaledSizeTbl[stbl_cnt].width = scale_tbl[i].width;
+        mNeedScaledSizeTbl[stbl_cnt].height= scale_tbl[i].height;
+        stbl_cnt++;
+    }
+
+    return stbl_cnt;
+}
+
+/*===========================================================================
+ * FUNCTION   : getTotalSizeTblCnt
+ *
+ * DESCRIPTION: get total picture size count after adding dimensions that need scaling
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : uint8_t type of picture size count
+ *==========================================================================*/
+size_t QCameraReprocScaleParam::getTotalSizeTblCnt()
+{
+    return mTotalSizeTblCnt;
+}
+
+/*===========================================================================
+ * FUNCTION   : getTotalSizeTbl
+ *
+ * DESCRIPTION: get picture size table after adding dimensions that need scaling
+ *
+ * PARAMETERS :  none
+ *
+ * RETURN     : cam_dimension_t list of picture size table
+ *==========================================================================*/
+cam_dimension_t *QCameraReprocScaleParam::getTotalSizeTbl()
+{
+    if(!mScaleEnabled)
+        return NULL;
+
+    return mTotalSizeTbl;
+}
+
+/*===========================================================================
+ * FUNCTION   : isHDREnabled
+ *
+ * DESCRIPTION: if HDR is enabled
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCameraParameters::isHDREnabled()
+{
+    return ((m_nBurstNum == 1) && (m_bHDREnabled || m_HDRSceneEnabled));
+}
+
+/*===========================================================================
+ * FUNCTION   : isAVTimerEnabled
+ *
+ * DESCRIPTION: if AVTimer is enabled
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCameraParameters::isAVTimerEnabled()
+{
+    return m_bAVTimerEnabled;
+}
+
+/*===========================================================================
+* FUNCTION   : isDISEnabled
+*
+* DESCRIPTION: if DIS is enabled
+*
+* PARAMETERS : none
+*
+* RETURN    : true: needed
+*               false: no need
+*==========================================================================*/
+bool QCameraParameters::isDISEnabled()
+{
+    return m_bDISEnabled;
+}
+
+/*===========================================================================
+* FUNCTION   : getISType
+*
+* DESCRIPTION: returns IS type
+*
+* PARAMETERS : none
+*
+* RETURN     : IS type
+*
+*==========================================================================*/
+cam_is_type_t QCameraParameters::getISType()
+{
+    return mIsType;
+}
+
+/*===========================================================================
+ * FUNCTION   : MobicatMask
+ *
+ * DESCRIPTION: returns mobicat mask
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : mobicat mask
+ *
+ *==========================================================================*/
+uint8_t QCameraParameters::getMobicatMask()
+{
+    return m_MobiMask;
+}
+
+/*===========================================================================
+ * FUNCTION   : sendStreamConfigInfo
+ *
+ * DESCRIPTION: send Stream config info.
+ *
+ * PARAMETERS :
+ *   @stream_config_info: Stream config information
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+bool QCameraParameters::sendStreamConfigInfo(cam_stream_size_info_t &stream_config_info) {
+    int32_t rc = NO_ERROR;
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf,
+            CAM_INTF_META_STREAM_INFO, stream_config_info)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to set stream info parm", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setStreamConfigure
+ *
+ * DESCRIPTION: set stream type, stream dimension for all configured streams.
+ *
+ * PARAMETERS :
+ *   @isCapture: Whether this configureation is for an image capture
+ *   @previewAsPostview: Use preview as postview
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+bool QCameraParameters::setStreamConfigure(bool isCapture,
+        bool previewAsPostview, bool resetConfig) {
+
+    int32_t rc = NO_ERROR;
+    cam_stream_size_info_t stream_config_info;
+    char value[PROPERTY_VALUE_MAX];
+    bool raw_yuv = false;
+    bool raw_capture = false;
+
+    if ( m_pParamBuf == NULL ) {
+        return NO_INIT;
+    }
+
+    memset(&stream_config_info, 0, sizeof(stream_config_info));
+    stream_config_info.num_streams = 0;
+
+    if (m_bStreamsConfigured) {
+        CDBG_HIGH("%s: Reset stream config!!", __func__);
+        rc = sendStreamConfigInfo(stream_config_info);
+        m_bStreamsConfigured = false;
+    }
+    if (resetConfig) {
+        CDBG_HIGH("%s: Done Resetting stream config!!", __func__);
+        return rc;
+    }
+
+    property_get("persist.camera.raw_yuv", value, "0");
+    raw_yuv = atoi(value) > 0 ? true : false;
+
+    if (isZSLMode() && getRecordingHintValue() != true) {
+        stream_config_info.type[stream_config_info.num_streams] =
+            CAM_STREAM_TYPE_PREVIEW;
+        getStreamDimension(CAM_STREAM_TYPE_PREVIEW,
+                stream_config_info.stream_sizes[stream_config_info.num_streams]);
+        updatePpFeatureMask(CAM_STREAM_TYPE_PREVIEW);
+        stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                mStreamPpMask[CAM_STREAM_TYPE_PREVIEW];
+        stream_config_info.num_streams++;
+
+        stream_config_info.type[stream_config_info.num_streams] =
+                CAM_STREAM_TYPE_ANALYSIS;
+        getStreamDimension(CAM_STREAM_TYPE_ANALYSIS,
+                stream_config_info.stream_sizes[stream_config_info.num_streams]);
+        updatePpFeatureMask(CAM_STREAM_TYPE_ANALYSIS);
+        stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                mStreamPpMask[CAM_STREAM_TYPE_ANALYSIS];
+        stream_config_info.num_streams++;
+
+        stream_config_info.type[stream_config_info.num_streams] =
+                CAM_STREAM_TYPE_SNAPSHOT;
+        getStreamDimension(CAM_STREAM_TYPE_SNAPSHOT,
+                stream_config_info.stream_sizes[stream_config_info.num_streams]);
+        updatePpFeatureMask(CAM_STREAM_TYPE_SNAPSHOT);
+        stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                mStreamPpMask[CAM_STREAM_TYPE_SNAPSHOT];
+        stream_config_info.num_streams++;
+
+    } else if (!isCapture) {
+        if (m_bRecordingHint) {
+            if (m_bDISEnabled) {
+                char value[PROPERTY_VALUE_MAX];
+                // Make default value for IS_TYPE as IS_TYPE_EIS_2_0
+                property_get("persist.camera.is_type", value, "4");
+                mIsType = static_cast<cam_is_type_t>(atoi(value));
+            } else {
+                mIsType = IS_TYPE_NONE;
+            }
+            stream_config_info.is_type = mIsType;
+            stream_config_info.type[stream_config_info.num_streams] =
+                    CAM_STREAM_TYPE_SNAPSHOT;
+            getStreamDimension(CAM_STREAM_TYPE_SNAPSHOT,
+                    stream_config_info.stream_sizes[stream_config_info.num_streams]);
+            updatePpFeatureMask(CAM_STREAM_TYPE_SNAPSHOT);
+            stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                    mStreamPpMask[CAM_STREAM_TYPE_SNAPSHOT];
+            stream_config_info.num_streams++;
+
+            stream_config_info.type[stream_config_info.num_streams] =
+                    CAM_STREAM_TYPE_VIDEO;
+            getStreamDimension(CAM_STREAM_TYPE_VIDEO,
+                    stream_config_info.stream_sizes[stream_config_info.num_streams]);
+            updatePpFeatureMask(CAM_STREAM_TYPE_VIDEO);
+            stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                    mStreamPpMask[CAM_STREAM_TYPE_VIDEO];
+            stream_config_info.num_streams++;
+        }
+
+        if (getRecordingHintValue() != true) {
+            /* Analysis stream is used only in capture usecase */
+            stream_config_info.type[stream_config_info.num_streams] =
+                    CAM_STREAM_TYPE_ANALYSIS;
+            getStreamDimension(CAM_STREAM_TYPE_ANALYSIS,
+                    stream_config_info.stream_sizes[stream_config_info.num_streams]);
+            updatePpFeatureMask(CAM_STREAM_TYPE_ANALYSIS);
+            stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                    mStreamPpMask[CAM_STREAM_TYPE_ANALYSIS];
+            stream_config_info.num_streams++;
+        }
+
+        stream_config_info.type[stream_config_info.num_streams] =
+                CAM_STREAM_TYPE_PREVIEW;
+        getStreamDimension(CAM_STREAM_TYPE_PREVIEW,
+                stream_config_info.stream_sizes[stream_config_info.num_streams]);
+        updatePpFeatureMask(CAM_STREAM_TYPE_PREVIEW);
+        stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                mStreamPpMask[CAM_STREAM_TYPE_PREVIEW];
+        stream_config_info.num_streams++;
+
+    } else {
+        if (isJpegPictureFormat() || isNV16PictureFormat() || isNV21PictureFormat()) {
+            if (!getofflineRAW()) {
+                stream_config_info.type[stream_config_info.num_streams] =
+                        CAM_STREAM_TYPE_SNAPSHOT;
+                getStreamDimension(CAM_STREAM_TYPE_SNAPSHOT,
+                        stream_config_info.stream_sizes[stream_config_info.num_streams]);
+                updatePpFeatureMask(CAM_STREAM_TYPE_SNAPSHOT);
+                stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                        mStreamPpMask[CAM_STREAM_TYPE_SNAPSHOT];
+                stream_config_info.num_streams++;
+            }
+
+            if (previewAsPostview) {
+                stream_config_info.type[stream_config_info.num_streams] =
+                        CAM_STREAM_TYPE_PREVIEW;
+                getStreamDimension(CAM_STREAM_TYPE_PREVIEW,
+                        stream_config_info.stream_sizes[stream_config_info.num_streams]);
+                updatePpFeatureMask(CAM_STREAM_TYPE_PREVIEW);
+                stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                        mStreamPpMask[CAM_STREAM_TYPE_PREVIEW];
+                stream_config_info.num_streams++;
+            } else {
+                stream_config_info.type[stream_config_info.num_streams] =
+                        CAM_STREAM_TYPE_POSTVIEW;
+                getStreamDimension(CAM_STREAM_TYPE_POSTVIEW,
+                        stream_config_info.stream_sizes[stream_config_info.num_streams]);
+                updatePpFeatureMask(CAM_STREAM_TYPE_POSTVIEW);
+                stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                        mStreamPpMask[CAM_STREAM_TYPE_POSTVIEW];
+                stream_config_info.num_streams++;
+            }
+        } else {
+            raw_capture = true;
+            stream_config_info.type[stream_config_info.num_streams] =
+                    CAM_STREAM_TYPE_RAW;
+            getStreamDimension(CAM_STREAM_TYPE_RAW,
+                    stream_config_info.stream_sizes[stream_config_info.num_streams]);
+            updatePpFeatureMask(CAM_STREAM_TYPE_RAW);
+            stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                    mStreamPpMask[CAM_STREAM_TYPE_RAW];
+            stream_config_info.num_streams++;
+        }
+    }
+    if (raw_yuv && !raw_capture && (isZSLMode() ||
+            (getofflineRAW() && isCapture && !getRecordingHintValue()))) {
+        cam_dimension_t max_dim = {0,0};
+        updateRAW(max_dim);
+        stream_config_info.type[stream_config_info.num_streams] =
+                CAM_STREAM_TYPE_RAW;
+        getStreamDimension(CAM_STREAM_TYPE_RAW,
+                stream_config_info.stream_sizes[stream_config_info.num_streams]);
+        updatePpFeatureMask(CAM_STREAM_TYPE_RAW);
+        stream_config_info.postprocess_mask[stream_config_info.num_streams] =
+                mStreamPpMask[CAM_STREAM_TYPE_RAW];
+        stream_config_info.num_streams++;
+    }
+    for (uint32_t k = 0; k < stream_config_info.num_streams; k++) {
+        ALOGI("%s: stream type %d, w x h: %d x %d, pp_mask: 0x%x", __func__,
+                stream_config_info.type[k],
+                stream_config_info.stream_sizes[k].width,
+                stream_config_info.stream_sizes[k].height,
+                stream_config_info.postprocess_mask[k]);
+    }
+
+    rc = sendStreamConfigInfo(stream_config_info);
+    m_bStreamsConfigured = true;
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addOnlineRotation
+ *
+ * DESCRIPTION: send additional rotation information for specific stream
+ *
+ * PARAMETERS :
+ *   @rotation: rotation
+ *   @streamId: internal stream id
+ *   @device_rotation: device rotation
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::addOnlineRotation(uint32_t rotation, uint32_t streamId,
+        int32_t device_rotation)
+{
+    int32_t rc = NO_ERROR;
+    cam_rotation_info_t rotation_info;
+    memset(&rotation_info, 0, sizeof(cam_rotation_info_t));
+
+    /* Add jpeg rotation information */
+    if (rotation == 0) {
+        rotation_info.rotation = ROTATE_0;
+    } else if (rotation == 90) {
+        rotation_info.rotation = ROTATE_90;
+    } else if (rotation == 180) {
+        rotation_info.rotation = ROTATE_180;
+    } else if (rotation == 270) {
+        rotation_info.rotation = ROTATE_270;
+    } else {
+        rotation_info.rotation = ROTATE_0;
+    }
+    rotation_info.streamId = streamId;
+
+    /* Add device rotation information */
+    if (device_rotation == 0) {
+        rotation_info.device_rotation = ROTATE_0;
+    } else if (device_rotation == 90) {
+        rotation_info.device_rotation = ROTATE_90;
+    } else if (device_rotation == 180) {
+        rotation_info.device_rotation = ROTATE_180;
+    } else if (device_rotation == 270) {
+        rotation_info.device_rotation = ROTATE_270;
+    } else {
+        rotation_info.device_rotation = ROTATE_0;
+    }
+
+    if(initBatchUpdate(m_pParamBuf) < 0 ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return BAD_TYPE;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_ROTATION, rotation_info)) {
+        ALOGE("%s:Failed to update table", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to set stream info parm", __func__);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : needThumbnailReprocess
+ *
+ * DESCRIPTION: Check if thumbnail reprocessing is needed
+ *
+ * PARAMETERS : @pFeatureMask - feature mask
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCameraParameters::needThumbnailReprocess(uint32_t *pFeatureMask)
+{
+    if (isUbiFocusEnabled() || isChromaFlashEnabled() ||
+            isOptiZoomEnabled() || isUbiRefocus() ||
+            isStillMoreEnabled()) {
+        *pFeatureMask &= ~CAM_QCOM_FEATURE_CHROMA_FLASH;
+        *pFeatureMask &= ~CAM_QCOM_FEATURE_UBIFOCUS;
+        *pFeatureMask &= ~CAM_QCOM_FEATURE_REFOCUS;
+        *pFeatureMask &= ~CAM_QCOM_FEATURE_OPTIZOOM;
+        *pFeatureMask &= ~CAM_QCOM_FEATURE_STILLMORE;
+        return false;
+    } else {
+        return true;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumOfExtraBuffersForImageProc
+ *
+ * DESCRIPTION: get number of extra input buffers needed by image processing
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of extra buffers needed by ImageProc;
+ *              0 if not ImageProc enabled
+ *==========================================================================*/
+uint8_t QCameraParameters::getNumOfExtraBuffersForImageProc()
+{
+    int numOfBufs = 0;
+
+    if (isUbiRefocus()) {
+        return (uint8_t)(m_pCapability->refocus_af_bracketing_need.burst_count - 1);
+    } else if (isUbiFocusEnabled()) {
+        numOfBufs += m_pCapability->ubifocus_af_bracketing_need.burst_count - 1;
+    } else if (m_bOptiZoomOn) {
+        numOfBufs += m_pCapability->opti_zoom_settings_need.burst_count - 1;
+    } else if (isChromaFlashEnabled()) {
+        numOfBufs += m_pCapability->chroma_flash_settings_need.burst_count - 1;
+    } else if (isStillMoreEnabled()) {
+        if (isSeeMoreEnabled()) {
+            m_stillmore_config.burst_count = 1;
+        } else if ((m_stillmore_config.burst_count >=
+                m_pCapability->stillmore_settings_need.min_burst_count) &&
+                (m_stillmore_config.burst_count <=
+                m_pCapability->stillmore_settings_need.max_burst_count)) {
+            numOfBufs += m_stillmore_config.burst_count - 1;
+        } else {
+            numOfBufs += m_pCapability->stillmore_settings_need.burst_count - 1;
+        }
+    }
+
+    return (uint8_t)(numOfBufs * getBurstNum());
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifBufIndex
+ *
+ * DESCRIPTION: get index of metadata to be used for EXIF
+ *
+ * PARAMETERS : @captureIndex - index of current captured frame
+ *
+ * RETURN     : index of metadata to be used for EXIF
+ *==========================================================================*/
+uint32_t QCameraParameters::getExifBufIndex(uint32_t captureIndex)
+{
+    uint32_t index = captureIndex;
+
+    if (isUbiRefocus()) {
+        if (captureIndex < m_pCapability->refocus_af_bracketing_need.burst_count) {
+            index = captureIndex;
+        } else {
+            index = 0;
+        }
+    } else if (isChromaFlashEnabled()) {
+        index = m_pCapability->chroma_flash_settings_need.metadata_index;
+    } else if (isHDREnabled()) {
+        if (isHDR1xFrameEnabled() && isHDR1xExtraBufferNeeded()) {
+            index = m_pCapability->hdr_bracketing_setting.num_frames;
+        } else {
+            for (index = 0; index < m_pCapability->hdr_bracketing_setting.num_frames; index++) {
+                if (0 == m_pCapability->hdr_bracketing_setting.exp_val.values[index]) {
+                    break;
+                }
+            }
+            if (index == m_pCapability->hdr_bracketing_setting.num_frames) {
+                index = captureIndex;
+            }
+        }
+    }
+
+    return index;
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumberInBufsForSingleShot
+ *
+ * DESCRIPTION: get number of input buffers for single shot
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of input buffers for single shot
+ *==========================================================================*/
+uint32_t QCameraParameters::getNumberInBufsForSingleShot()
+{
+    uint32_t numOfBufs = 1;
+
+    if (isUbiRefocus()) {
+        numOfBufs = m_pCapability->refocus_af_bracketing_need.burst_count;
+    } else if (isUbiFocusEnabled()) {
+        numOfBufs = m_pCapability->ubifocus_af_bracketing_need.burst_count;
+    } else if (m_bOptiZoomOn) {
+        numOfBufs = m_pCapability->opti_zoom_settings_need.burst_count;
+    } else if (isChromaFlashEnabled()) {
+        numOfBufs = m_pCapability->chroma_flash_settings_need.burst_count;
+    } else if (isHDREnabled()) {
+        numOfBufs = m_pCapability->hdr_bracketing_setting.num_frames;
+        if (isHDR1xFrameEnabled() && isHDR1xExtraBufferNeeded()) {
+            numOfBufs++;
+        }
+    } else if (isStillMoreEnabled()) {
+        if (isSeeMoreEnabled()) {
+            m_stillmore_config.burst_count = 1;
+            numOfBufs = m_stillmore_config.burst_count;
+        } else if ((m_stillmore_config.burst_count >=
+                m_pCapability->stillmore_settings_need.min_burst_count) &&
+                (m_stillmore_config.burst_count <=
+                m_pCapability->stillmore_settings_need.max_burst_count)) {
+            numOfBufs = m_stillmore_config.burst_count;
+        } else {
+            numOfBufs = m_pCapability->stillmore_settings_need.burst_count;
+        }
+    }
+
+    return numOfBufs;
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumberOutBufsForSingleShot
+ *
+ * DESCRIPTION: get number of output buffers for single shot
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of output buffers for single shot
+ *==========================================================================*/
+uint32_t QCameraParameters::getNumberOutBufsForSingleShot()
+{
+    uint32_t numOfBufs = 1;
+
+    if (isUbiRefocus()) {
+        numOfBufs = m_pCapability->refocus_af_bracketing_need.output_count;
+    } else if (isHDREnabled()) {
+        if (isHDR1xFrameEnabled()) {
+            numOfBufs++;
+        }
+    }
+
+    return numOfBufs;
+}
+
+/*===========================================================================
+ * FUNCTION   : is4k2kVideoResolution
+ *
+ * DESCRIPTION: if resolution is 4k x 2k or true 4k x 2k
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: video resolution is 4k x 2k
+ *              false: video resolution is not 4k x 2k
+ *==========================================================================*/
+bool QCameraParameters::is4k2kVideoResolution()
+{
+   bool enabled = false;
+   cam_dimension_t resolution;
+   getVideoSize(&resolution.width, &resolution.height);
+   if (!(resolution.width < 3840 && resolution.height < 2160)) {
+      enabled = true;
+   }
+
+   return enabled;
+}
+
+/*===========================================================================
+ * FUNCTION   : updateDebugLevel
+ *
+ * DESCRIPTION: send CAM_INTF_PARM_UPDATE_DEBUG_LEVEL to backend
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : NO_ERROR --success
+ *              int32_t type of status
+ *==========================================================================*/
+int32_t QCameraParameters::updateDebugLevel()
+{
+    if ( m_pParamBuf == NULL ) {
+        return NO_INIT;
+    }
+
+    int32_t rc = initBatchUpdate(m_pParamBuf);
+    if ( rc != NO_ERROR ) {
+        ALOGE("%s:Failed to initialize group update table", __func__);
+        return rc;
+    }
+
+    uint32_t dummyDebugLevel = 0;
+    /* The value of dummyDebugLevel is irrelavent. On
+     * CAM_INTF_PARM_UPDATE_DEBUG_LEVEL, read debug property */
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_UPDATE_DEBUG_LEVEL, dummyDebugLevel)) {
+        ALOGE("%s: Parameters batch failed",__func__);
+        return BAD_VALUE;
+    }
+
+    rc = commitSetBatch();
+    if ( rc != NO_ERROR ) {
+        ALOGE("%s:Failed to commit batch parameters", __func__);
+        return rc;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setOfflineRAW
+ *
+ * DESCRIPTION: Function to decide Offline RAW feature.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraParameters::setOfflineRAW()
+{
+   char value[PROPERTY_VALUE_MAX];
+   bool raw_yuv = false;
+   bool offlineRaw = false;
+
+   property_get("persist.camera.raw_yuv", value, "0");
+   raw_yuv = atoi(value) > 0 ? true : false;
+   property_get("persist.camera.offlineraw", value, "0");
+   offlineRaw = atoi(value) > 0 ? true : false;
+   if((raw_yuv || isRdiMode()) && offlineRaw){
+       mOfflineRAW = true;
+   }else{
+       mOfflineRAW = false;
+   }
+   CDBG_HIGH("%s: Offline Raw  %d",__func__, mOfflineRAW);
+}
+
+/*===========================================================================
+ * FUNCTION   : updatePpFeatureMask
+ *
+ * DESCRIPTION: Updates the feature mask for a particular stream depending
+ *              on current client configuration.
+ *
+ * PARAMETERS :
+ *  @stream_type: Camera stream type
+ *
+ * RETURN     : NO_ERROR --success
+ *              int32_t type of status
+ *==========================================================================*/
+int32_t QCameraParameters::updatePpFeatureMask(cam_stream_type_t stream_type) {
+
+    uint32_t feature_mask = 0;
+
+    if (stream_type >= CAM_STREAM_TYPE_MAX) {
+        ALOGE("%s: Error!! stream type: %d not valid", __func__, stream_type);
+        return -1;
+    }
+
+    // Update feature mask for SeeMore in video and video preview
+    if (isSeeMoreEnabled() &&
+            !is4k2kVideoResolution() &&
+            ((stream_type == CAM_STREAM_TYPE_VIDEO) ||
+            (stream_type == CAM_STREAM_TYPE_PREVIEW && getRecordingHintValue()))) {
+       feature_mask |= CAM_QCOM_FEATURE_LLVD;
+    }
+
+    // Do not enable feature mask for ZSL/non-ZSL/liveshot snapshot except for 4K2k case
+    if ((getRecordingHintValue() &&
+            (stream_type == CAM_STREAM_TYPE_SNAPSHOT) && is4k2kVideoResolution()) ||
+            (stream_type != CAM_STREAM_TYPE_SNAPSHOT)) {
+        if ((m_nMinRequiredPpMask & CAM_QCOM_FEATURE_SHARPNESS) &&
+                !isOptiZoomEnabled()) {
+            feature_mask |= CAM_QCOM_FEATURE_SHARPNESS;
+        }
+
+        if (m_nMinRequiredPpMask & CAM_QCOM_FEATURE_EFFECT) {
+            feature_mask |= CAM_QCOM_FEATURE_EFFECT;
+        }
+        if (isWNREnabled() && (getRecordingHintValue() == false)) {
+            feature_mask |= CAM_QCOM_FEATURE_DENOISE2D;
+        }
+
+        //Set flip mode based on Stream type;
+        int flipMode = getFlipMode(stream_type);
+        if (flipMode > 0) {
+            feature_mask |= CAM_QCOM_FEATURE_FLIP;
+        }
+    }
+
+    if ((isTNRVideoEnabled() && (CAM_STREAM_TYPE_VIDEO == stream_type))
+            || (isTNRPreviewEnabled() && (CAM_STREAM_TYPE_PREVIEW == stream_type))) {
+        feature_mask |= CAM_QCOM_FEATURE_CPP_TNR;
+    }
+
+    //Rotation could also have an effect on pp feature mask
+    cam_pp_feature_config_t config;
+    cam_dimension_t dim;
+    memset(&config, 0, sizeof(cam_pp_feature_config_t));
+    getStreamRotation(stream_type, config, dim);
+    feature_mask |= config.feature_mask;
+
+    // Store stream feature mask
+    setStreamPpMask(stream_type, feature_mask);
+    CDBG_HIGH("%s: stream type: %d, pp_mask: 0x%x", __func__, stream_type, feature_mask);
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setStreamPpMask
+ *
+ * DESCRIPTION: Stores a particular feature mask for a given camera stream
+ *
+ * PARAMETERS :
+ *  @stream_type: Camera stream type
+ *  @pp_mask  : Feature mask
+ *
+ * RETURN     : NO_ERROR --success
+ *              int32_t type of status
+ *==========================================================================*/
+int32_t QCameraParameters::setStreamPpMask(cam_stream_type_t stream_type,
+        uint32_t pp_mask) {
+
+    if(stream_type >= CAM_STREAM_TYPE_MAX) {
+        return BAD_TYPE;
+    }
+
+    mStreamPpMask[stream_type] = pp_mask;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamPpMask
+ *
+ * DESCRIPTION: Retrieves the feature mask for a given camera stream
+ *
+ * PARAMETERS :
+ *  @stream_type: Camera stream type
+ *  @pp_mask  : Feature mask
+ *
+ * RETURN     : NO_ERROR --success
+ *              int32_t type of status
+ *==========================================================================*/
+int32_t QCameraParameters::getStreamPpMask(cam_stream_type_t stream_type,
+        uint32_t &pp_mask) {
+
+    if(stream_type >= CAM_STREAM_TYPE_MAX) {
+        return BAD_TYPE;
+    }
+
+    pp_mask = mStreamPpMask[stream_type];
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setReprocCount
+ *
+ * DESCRIPTION: Set total reprocessing pass count
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraParameters::setReprocCount()
+{
+    mTotalPPCount = 1; //Default reprocessing Pass count
+    char value[PROPERTY_VALUE_MAX];
+    int multpass = 0;
+
+    property_get("persist.camera.multi_pass", value, "0");
+    multpass = atoi(value);
+
+   if ( multpass == 0 ) {
+       return;
+   }
+
+    if ((getZoomLevel() != 0) && (isZSLMode())) {
+        ALOGW("Zoom Present. Need 2nd pass for post processing");
+        mTotalPPCount++;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setBufBatchCount
+ *
+ * DESCRIPTION: Function to configure batch buffer
+ *
+ * PARAMETERS : int8_t buf_cnt
+ *                     Buffer batch count
+ *
+ * RETURN     :  None
+ *==========================================================================*/
+void QCameraParameters::setBufBatchCount(int8_t buf_cnt)
+{
+    mBufBatchCnt = 0;
+    char value[PROPERTY_VALUE_MAX];
+    int8_t count = 0;
+
+    property_get("persist.camera.batchcount", value, "0");
+    count = atoi(value);
+
+    if (!(count != 0 || buf_cnt > CAMERA_MIN_BATCH_COUNT)) {
+        CDBG_HIGH("%s : Buffer batch count = %d", __func__, mBufBatchCnt);
+        return;
+    }
+
+    while((m_pCapability->max_batch_bufs_supported != 0)
+            && (m_pCapability->max_batch_bufs_supported < buf_cnt)) {
+        buf_cnt = buf_cnt / 2;
+    }
+
+    if (count > 0) {
+        mBufBatchCnt = count;
+        CDBG_HIGH("%s : Buffer batch count = %d", __func__, mBufBatchCnt);
+        return;
+    }
+
+    if (buf_cnt > CAMERA_MIN_BATCH_COUNT) {
+        mBufBatchCnt = buf_cnt;
+        CDBG_HIGH("%s : Buffer batch count = %d", __func__, mBufBatchCnt);
+        return;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : dump
+ *
+ * DESCRIPTION: Composes a string based on current configuration
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : Formatted string
+ *==========================================================================*/
+String8 QCameraParameters::dump()
+{
+    String8 str("\n");
+    char s[128];
+
+    snprintf(s, 128, "Preview Pixel Fmt: %d\n", getPreviewHalPixelFormat());
+    str += s;
+
+    snprintf(s, 128, "ZSL Burst Interval: %d\n", getZSLBurstInterval());
+    str += s;
+
+    snprintf(s, 128, "ZSL Queue Depth: %d\n", getZSLQueueDepth());
+    str += s;
+
+    snprintf(s, 128, "ZSL Back Look Count %d\n", getZSLBackLookCount());
+    str += s;
+
+    snprintf(s, 128, "Max Unmatched Frames In Queue: %d\n",
+        getMaxUnmatchedFramesInQueue());
+    str += s;
+
+    snprintf(s, 128, "Is ZSL Mode: %d\n", isZSLMode());
+    str += s;
+
+    snprintf(s, 128, "Is No Display Mode: %d\n", isNoDisplayMode());
+    str += s;
+
+    snprintf(s, 128, "Is WNR Enabled: %d\n", isWNREnabled());
+    str += s;
+
+    snprintf(s, 128, "isHfrMode: %d\n", isHfrMode());
+    str += s;
+
+    snprintf(s, 128, "getNumOfSnapshots: %d\n", getNumOfSnapshots());
+    str += s;
+
+    snprintf(s, 128, "getNumOfExtraHDRInBufsIfNeeded: %d\n",
+        getNumOfExtraHDRInBufsIfNeeded());
+    str += s;
+
+    snprintf(s, 128, "getNumOfExtraHDROutBufsIfNeeded: %d\n",
+        getNumOfExtraHDROutBufsIfNeeded());
+    str += s;
+
+    snprintf(s, 128, "getBurstNum: %d\n", getBurstNum());
+    str += s;
+
+    snprintf(s, 128, "getRecordingHintValue: %d\n", getRecordingHintValue());
+    str += s;
+
+    snprintf(s, 128, "getJpegQuality: %u\n", getJpegQuality());
+    str += s;
+
+    snprintf(s, 128, "getJpegRotation: %u\n", getJpegRotation());
+    str += s;
+
+    snprintf(s, 128, "isHistogramEnabled: %d\n", isHistogramEnabled());
+    str += s;
+
+    snprintf(s, 128, "isFaceDetectionEnabled: %d\n", isFaceDetectionEnabled());
+    str += s;
+
+    snprintf(s, 128, "isHDREnabled: %d\n", isHDREnabled());
+    str += s;
+
+    snprintf(s, 128, "isAutoHDREnabled: %d\n", isAutoHDREnabled());
+    str += s;
+
+    snprintf(s, 128, "isAVTimerEnabled: %d\n", isAVTimerEnabled());
+    str += s;
+
+    snprintf(s, 128, "getFocusMode: %d\n", getFocusMode());
+    str += s;
+
+    snprintf(s, 128, "isJpegPictureFormat: %d\n", isJpegPictureFormat());
+    str += s;
+
+    snprintf(s, 128, "isNV16PictureFormat: %d\n", isNV16PictureFormat());
+    str += s;
+
+    snprintf(s, 128, "isNV21PictureFormat: %d\n", isNV21PictureFormat());
+    str += s;
+
+    snprintf(s, 128, "isSnapshotFDNeeded: %d\n", isSnapshotFDNeeded());
+    str += s;
+
+    snprintf(s, 128, "isHDR1xFrameEnabled: %d\n", isHDR1xFrameEnabled());
+    str += s;
+
+    snprintf(s, 128, "isYUVFrameInfoNeeded: %d\n", isYUVFrameInfoNeeded());
+    str += s;
+
+    snprintf(s, 128, "isHDR1xExtraBufferNeeded: %d\n",
+        isHDR1xExtraBufferNeeded());
+    str += s;
+
+    snprintf(s, 128, "isHDROutputCropEnabled: %d\n", isHDROutputCropEnabled());
+    str += s;
+
+    snprintf(s, 128, "isPreviewFlipChanged: %d\n", isPreviewFlipChanged());
+    str += s;
+
+    snprintf(s, 128, "isVideoFlipChanged: %d\n", isVideoFlipChanged());
+    str += s;
+
+    snprintf(s, 128, "isSnapshotFlipChanged: %d\n", isSnapshotFlipChanged());
+    str += s;
+
+    snprintf(s, 128, "isHDRThumbnailProcessNeeded: %d\n",
+        isHDRThumbnailProcessNeeded());
+    str += s;
+
+    snprintf(s, 128, "getAutoFlickerMode: %d\n", getAutoFlickerMode());
+    str += s;
+
+    snprintf(s, 128, "getNumOfExtraBuffersForImageProc: %d\n",
+        getNumOfExtraBuffersForImageProc());
+    str += s;
+
+    snprintf(s, 128, "isUbiFocusEnabled: %d\n", isUbiFocusEnabled());
+    str += s;
+
+    snprintf(s, 128, "isChromaFlashEnabled: %d\n", isChromaFlashEnabled());
+    str += s;
+
+    snprintf(s, 128, "isOptiZoomEnabled: %d\n", isOptiZoomEnabled());
+    str += s;
+
+    snprintf(s, 128, "isStillMoreEnabled: %d\n", isStillMoreEnabled());
+    str += s;
+
+    snprintf(s, 128, "getBurstCountForAdvancedCapture: %d\n",
+        getBurstCountForAdvancedCapture());
+    str += s;
+
+    return str;
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumOfExtraBuffersForVideo
+ *
+ * DESCRIPTION: get number of extra buffers needed by image processing
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of extra buffers needed by ImageProc;
+ *              0 if not ImageProc enabled
+ *==========================================================================*/
+uint8_t QCameraParameters::getNumOfExtraBuffersForVideo()
+{
+    uint8_t numOfBufs = 0;
+
+    if (isSeeMoreEnabled()) {
+        numOfBufs = 1;
+    }
+
+    return numOfBufs;
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumOfExtraBuffersForPreview
+ *
+ * DESCRIPTION: get number of extra buffers needed by image processing
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of extra buffers needed by ImageProc;
+ *              0 if not ImageProc enabled
+ *==========================================================================*/
+uint8_t QCameraParameters::getNumOfExtraBuffersForPreview()
+{
+    uint8_t numOfBufs = 0;
+
+    if (isSeeMoreEnabled() && !isZSLMode() && getRecordingHintValue()) {
+        numOfBufs = 1;
+    }
+
+    return numOfBufs;
+}
+
+/*===========================================================================
+ * FUNCTION   : setToneMapMode
+ *
+ * DESCRIPTION: enable or disable tone map
+ *
+ * PARAMETERS :
+ *   @enable : enable: 1; disable 0
+ *   @initCommit: if configuration list needs to be initialized and commited
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setToneMapMode(uint32_t enable, bool initCommit)
+{
+    int32_t rc = NO_ERROR;
+    CDBG_HIGH("%s: tone map mode %d ", __func__, enable);
+
+    if (initCommit) {
+        if (initBatchUpdate(m_pParamBuf) < 0) {
+            ALOGE("%s:Failed to initialize group update table", __func__);
+            return FAILED_TRANSACTION;
+        }
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_TONE_MAP_MODE, enable)) {
+        ALOGE("%s:Failed to update tone map mode", __func__);
+        return BAD_VALUE;
+    }
+
+    if (initCommit) {
+        rc = commitSetBatch();
+        if (rc != NO_ERROR) {
+            ALOGE("%s:Failed to commit tone map mode", __func__);
+            return rc;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setCDSMode
+ *
+ * DESCRIPTION: set CDS mode
+ *
+ * PARAMETERS :
+ *   @cds_mode : cds mode
+ *   @initCommit: if configuration list needs to be initialized and commited
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraParameters::setCDSMode(int32_t cds_mode, bool initCommit)
+{
+    if (initCommit) {
+        if (initBatchUpdate(m_pParamBuf) < 0) {
+            ALOGE("%s:Failed to initialize group update table", __func__);
+            return FAILED_TRANSACTION;
+        }
+    }
+
+    int32_t rc = NO_ERROR;
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(m_pParamBuf, CAM_INTF_PARM_CDS_MODE, cds_mode)) {
+        ALOGE("%s:Failed to update cds mode", __func__);
+        return BAD_VALUE;
+    }
+
+    if (initCommit) {
+        rc = commitSetBatch();
+        if (NO_ERROR != rc) {
+            ALOGE("%s:Failed to set cds mode", __func__);
+            return rc;
+        }
+    }
+
+    CDBG_HIGH(" cds mode -> %d", cds_mode);
+
+    return rc;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL/QCameraParameters.h b/camera/QCamera2/HAL/QCameraParameters.h
new file mode 100644
index 0000000..e248313
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraParameters.h
@@ -0,0 +1,1067 @@
+/*
+** Copyright 2008, The Android Open Source Project
+** Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+** Not a Contribution. Apache license notifications and license are
+** retained for attribution purposes only.
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+#ifndef ANDROID_HARDWARE_QCAMERA_PARAMETERS_H
+#define ANDROID_HARDWARE_QCAMERA_PARAMETERS_H
+
+#include <camera/CameraParameters.h>
+#include <cutils/properties.h>
+#include <hardware/camera.h>
+#include <stdlib.h>
+#include <utils/Errors.h>
+#include "cam_intf.h"
+#include "cam_types.h"
+#include "QCameraMem.h"
+#include "QCameraThermalAdapter.h"
+
+extern "C" {
+#include <mm_jpeg_interface.h>
+}
+
+using namespace android;
+
+namespace qcamera {
+
+//EXIF globals
+static const char ExifAsciiPrefix[] = { 0x41, 0x53, 0x43, 0x49, 0x49, 0x0, 0x0, 0x0 };          // "ASCII\0\0\0"
+static const char ExifUndefinedPrefix[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };   // "\0\0\0\0\0\0\0\0"
+
+#define EXIF_ASCII_PREFIX_SIZE           8   //(sizeof(ExifAsciiPrefix))
+#define FOCAL_LENGTH_DECIMAL_PRECISION   100
+
+#define CAMERA_MIN_BATCH_COUNT           1
+
+class QCameraAdjustFPS
+{
+public:
+    virtual int recalcFPSRange(int &minFPS, int &maxFPS,
+            cam_fps_range_t &adjustedRange) = 0;
+    virtual ~QCameraAdjustFPS() {}
+};
+
+class QCameraParameters;
+class QCameraReprocScaleParam{
+public:
+    QCameraReprocScaleParam(QCameraParameters *parent);
+    virtual ~QCameraReprocScaleParam();
+
+    virtual void setScaleEnable(bool enabled);
+    virtual int32_t setScaleSizeTbl(size_t scale_cnt,
+            cam_dimension_t *scale_tbl, size_t org_cnt,
+            cam_dimension_t *org_tbl);
+    virtual int32_t setValidatePicSize(int &width, int &height);
+
+    virtual bool isScaleEnabled();
+    virtual bool isUnderScaling();
+
+
+    virtual size_t getScaleSizeTblCnt();
+    virtual cam_dimension_t *getScaledSizeTbl();
+    virtual size_t getTotalSizeTblCnt();
+    virtual cam_dimension_t *getTotalSizeTbl();
+    virtual int32_t getPicSizeFromAPK(int &width, int &height);
+    virtual int32_t getPicSizeSetted(int &width, int &height);
+
+private:
+    bool isScalePicSize(int width, int height);
+    bool isValidatePicSize(int width, int height);
+    int32_t setSensorSupportedPicSize();
+    size_t checkScaleSizeTable(size_t scale_cnt, cam_dimension_t *scale_tbl,
+            size_t org_cnt, cam_dimension_t *org_tbl);
+
+    QCameraParameters *mParent;
+    bool mScaleEnabled;
+    bool mIsUnderScaling;   //if in scale status
+    bool mScaleDirection;   // 0: Upscaling; 1: Downscaling
+
+    // picture size cnt that need scale operation
+    size_t mNeedScaleCnt;
+    cam_dimension_t mNeedScaledSizeTbl[MAX_SCALE_SIZES_CNT];
+
+    // sensor supported size cnt and table
+    size_t mSensorSizeTblCnt;
+    cam_dimension_t *mSensorSizeTbl;
+
+    // Total size cnt (sensor supported + need scale cnt)
+    size_t mTotalSizeTblCnt;
+    cam_dimension_t mTotalSizeTbl[MAX_SIZES_CNT];
+
+    cam_dimension_t mPicSizeFromAPK;   // dimension that APK is expected
+    cam_dimension_t mPicSizeSetted;    // dimension that config vfe
+};
+
+class QCameraParameters: public CameraParameters
+{
+public:
+    QCameraParameters();
+    QCameraParameters(const String8 &params);
+    ~QCameraParameters();
+
+    // Supported PREVIEW/RECORDING SIZES IN HIGH FRAME RATE recording, sizes in pixels.
+    // Example value: "800x480,432x320". Read only.
+    static const char KEY_QC_SUPPORTED_HFR_SIZES[];
+    // The mode of preview frame rate.
+    // Example value: "frame-rate-auto, frame-rate-fixed".
+    static const char KEY_QC_PREVIEW_FRAME_RATE_MODE[];
+    static const char KEY_QC_SUPPORTED_PREVIEW_FRAME_RATE_MODES[];
+    static const char KEY_QC_PREVIEW_FRAME_RATE_AUTO_MODE[];
+    static const char KEY_QC_PREVIEW_FRAME_RATE_FIXED_MODE[];
+    static const char KEY_QC_SUPPORTED_SKIN_TONE_ENHANCEMENT_MODES[] ;
+
+    // Supported live snapshot sizes
+    static const char KEY_QC_SUPPORTED_LIVESNAPSHOT_SIZES[];
+
+    // Supported Raw formats
+    static const char KEY_QC_SUPPORTED_RAW_FORMATS[];
+    static const char KEY_QC_RAW_FORMAT[];
+
+    //Touch Af/AEC settings.
+    static const char KEY_QC_TOUCH_AF_AEC[];
+    static const char KEY_QC_SUPPORTED_TOUCH_AF_AEC[];
+    //Touch Index for AEC.
+    static const char KEY_QC_TOUCH_INDEX_AEC[];
+    //Touch Index for AF.
+    static const char KEY_QC_TOUCH_INDEX_AF[];
+    // Current auto scene detection mode.
+    // Example value: "off" or "on" constants. Read/write.
+    static const char KEY_QC_SCENE_DETECT[];
+    // Supported auto scene detection settings.
+    // Example value: "off,on". Read only.
+    static const char KEY_QC_SUPPORTED_SCENE_DETECT[];
+    static const char KEY_QC_SELECTABLE_ZONE_AF[];
+
+    static const char KEY_QC_ISO_MODE[];
+    static const char KEY_QC_CONTINUOUS_ISO[];
+    static const char KEY_QC_MIN_ISO[];
+    static const char KEY_QC_MAX_ISO[];
+    static const char KEY_QC_SUPPORTED_ISO_MODES[];
+    static const char KEY_QC_EXPOSURE_TIME[];
+    static const char KEY_QC_MIN_EXPOSURE_TIME[];
+    static const char KEY_QC_MAX_EXPOSURE_TIME[];
+    static const char KEY_QC_LENSSHADE[] ;
+    static const char KEY_QC_SUPPORTED_LENSSHADE_MODES[] ;
+    static const char KEY_QC_AUTO_EXPOSURE[];
+    static const char KEY_QC_SUPPORTED_AUTO_EXPOSURE[];
+
+    static const char KEY_QC_GPS_LATITUDE_REF[];
+    static const char KEY_QC_GPS_LONGITUDE_REF[];
+    static const char KEY_QC_GPS_ALTITUDE_REF[];
+    static const char KEY_QC_GPS_STATUS[];
+    static const char KEY_QC_MEMORY_COLOR_ENHANCEMENT[];
+    static const char KEY_QC_SUPPORTED_MEM_COLOR_ENHANCE_MODES[];
+    static const char KEY_QC_DIS[];
+    static const char KEY_QC_OIS[];
+    static const char KEY_QC_SUPPORTED_DIS_MODES[];
+    static const char KEY_QC_SUPPORTED_OIS_MODES[];
+
+    static const char KEY_QC_ZSL[];
+    static const char KEY_QC_SUPPORTED_ZSL_MODES[];
+    static const char KEY_QC_ZSL_BURST_INTERVAL[];
+    static const char KEY_QC_ZSL_BURST_LOOKBACK[];
+    static const char KEY_QC_ZSL_QUEUE_DEPTH[];
+
+    static const char KEY_QC_CAMERA_MODE[];
+    static const char KEY_QC_ORIENTATION[];
+
+    static const char KEY_QC_VIDEO_HIGH_FRAME_RATE[];
+    static const char KEY_QC_VIDEO_HIGH_SPEED_RECORDING[];
+    static const char KEY_QC_SUPPORTED_VIDEO_HIGH_FRAME_RATE_MODES[];
+    static const char KEY_QC_HIGH_DYNAMIC_RANGE_IMAGING[];
+    static const char KEY_QC_SUPPORTED_HDR_IMAGING_MODES[];
+    static const char KEY_QC_AE_BRACKET_HDR[];
+    static const char KEY_QC_SUPPORTED_AE_BRACKET_MODES[];
+    static const char KEY_QC_CAPTURE_BURST_EXPOSURE[];
+    static const char KEY_QC_NUM_SNAPSHOT_PER_SHUTTER[];
+    static const char KEY_QC_NUM_RETRO_BURST_PER_SHUTTER[];
+    static const char KEY_QC_SNAPSHOT_BURST_LED_ON_PERIOD[];
+    static const char KEY_QC_SNAPSHOT_BURST_NUM[];
+    static const char KEY_QC_NO_DISPLAY_MODE[];
+    static const char KEY_QC_RAW_PICUTRE_SIZE[];
+    static const char KEY_QC_TINTLESS_ENABLE[];
+    static const char KEY_QC_SCENE_SELECTION[];
+    static const char KEY_QC_CDS_MODE[];
+    static const char KEY_QC_VIDEO_CDS_MODE[];
+    static const char KEY_QC_SUPPORTED_CDS_MODES[];
+    static const char KEY_QC_SUPPORTED_VIDEO_CDS_MODES[];
+    static const char KEY_QC_TNR_MODE[];
+    static const char KEY_QC_VIDEO_TNR_MODE[];
+    static const char KEY_QC_SUPPORTED_TNR_MODES[];
+    static const char KEY_QC_SUPPORTED_VIDEO_TNR_MODES[];
+
+    static const char KEY_INTERNAL_PERVIEW_RESTART[];
+    static const char KEY_QC_WB_MANUAL_CCT[];
+    static const char KEY_QC_MIN_WB_CCT[];
+    static const char KEY_QC_MAX_WB_CCT[];
+    static const char KEY_QC_MANUAL_WB_GAINS[];
+    static const char KEY_QC_MIN_WB_GAIN[];
+    static const char KEY_QC_MAX_WB_GAIN[];
+    static const char WHITE_BALANCE_MANUAL[];
+    static const char FOCUS_MODE_MANUAL_POSITION[];
+
+    static const char KEY_QC_MANUAL_FOCUS_POSITION[];
+    static const char KEY_QC_MANUAL_FOCUS_POS_TYPE[];
+    static const char KEY_QC_MIN_FOCUS_POS_INDEX[];
+    static const char KEY_QC_MAX_FOCUS_POS_INDEX[];
+    static const char KEY_QC_MIN_FOCUS_POS_DAC[];
+    static const char KEY_QC_MAX_FOCUS_POS_DAC[];
+    static const char KEY_QC_MIN_FOCUS_POS_RATIO[];
+    static const char KEY_QC_MAX_FOCUS_POS_RATIO[];
+    static const char KEY_QC_MIN_FOCUS_POS_DIOPTER[];
+    static const char KEY_QC_MAX_FOCUS_POS_DIOPTER[];
+    static const char KEY_QC_FOCUS_POSITION_SCALE[];
+    static const char KEY_QC_FOCUS_POSITION_DIOPTER[];
+
+    static const char KEY_QC_SUPPORTED_MANUAL_FOCUS_MODES[];
+    static const char KEY_QC_SUPPORTED_MANUAL_EXPOSURE_MODES[];
+    static const char KEY_QC_SUPPORTED_MANUAL_WB_MODES[];
+    static const char KEY_QC_FOCUS_SCALE_MODE[];
+    static const char KEY_QC_FOCUS_DIOPTER_MODE[];
+    static const char KEY_QC_ISO_PRIORITY[];
+    static const char KEY_QC_EXP_TIME_PRIORITY[];
+    static const char KEY_QC_USER_SETTING[];
+    static const char KEY_QC_WB_CCT_MODE[];
+    static const char KEY_QC_WB_GAIN_MODE[];
+    static const char KEY_QC_MANUAL_WB_TYPE[];
+    static const char KEY_QC_MANUAL_WB_VALUE[];
+    static const char KEY_QC_CURRENT_EXPOSURE_TIME[];
+    static const char KEY_QC_CURRENT_ISO[];
+
+    // DENOISE
+    static const char KEY_QC_DENOISE[];
+    static const char KEY_QC_SUPPORTED_DENOISE[];
+
+    //Selectable zone AF.
+    static const char KEY_QC_FOCUS_ALGO[];
+    static const char KEY_QC_SUPPORTED_FOCUS_ALGOS[];
+
+    //Face Detection
+    static const char KEY_QC_FACE_DETECTION[];
+    static const char KEY_QC_SUPPORTED_FACE_DETECTION[];
+
+    //Face Recognition
+    static const char KEY_QC_FACE_RECOGNITION[];
+    static const char KEY_QC_SUPPORTED_FACE_RECOGNITION[];
+
+    // supported camera features to be queried by Snapdragon SDK
+    //Read only
+    static const char KEY_QC_SUPPORTED_CAMERA_FEATURES[];
+
+    //Indicates number of faces requested by the application.
+    //This value will be rejected if the requested faces
+    //greater than supported by hardware.
+    //Write only.
+    static const char KEY_QC_MAX_NUM_REQUESTED_FACES[];
+
+    //preview flip
+    static const char KEY_QC_PREVIEW_FLIP[];
+    //video flip
+    static const char KEY_QC_VIDEO_FLIP[];
+    //snapshot picture flip
+    static const char KEY_QC_SNAPSHOT_PICTURE_FLIP[];
+
+    static const char KEY_QC_SUPPORTED_FLIP_MODES[];
+
+    //Face Detection, Facial processing requirement
+    static const char KEY_QC_SNAPSHOT_FD_DATA[];
+
+    //Auto HDR enable
+    static const char KEY_QC_AUTO_HDR_ENABLE[];
+    // video rotation
+    static const char KEY_QC_VIDEO_ROTATION[];
+    static const char KEY_QC_SUPPORTED_VIDEO_ROTATION_VALUES[];
+
+    //Redeye Reduction
+    static const char KEY_QC_REDEYE_REDUCTION[];
+    static const char KEY_QC_SUPPORTED_REDEYE_REDUCTION[];
+    static const char EFFECT_EMBOSS[];
+    static const char EFFECT_SKETCH[];
+    static const char EFFECT_NEON[];
+
+    //AF Bracketing
+    static const char KEY_QC_AF_BRACKET[];
+    static const char KEY_QC_SUPPORTED_AF_BRACKET_MODES[];
+
+    //Refocus
+    static const char KEY_QC_RE_FOCUS[];
+    static const char KEY_QC_SUPPORTED_RE_FOCUS_MODES[];
+
+    //Chroma Flash
+    static const char KEY_QC_CHROMA_FLASH[];
+    static const char KEY_QC_SUPPORTED_CHROMA_FLASH_MODES[];
+
+    //Opti Zoom
+    static const char KEY_QC_OPTI_ZOOM[];
+    static const char KEY_QC_SUPPORTED_OPTI_ZOOM_MODES[];
+
+    // Auto HDR supported
+    static const char KEY_QC_AUTO_HDR_SUPPORTED[];
+
+    // HDR modes
+    static const char KEY_QC_HDR_MODE[];
+    static const char KEY_QC_SUPPORTED_KEY_QC_HDR_MODES[];
+
+    //True Portrait
+    static const char KEY_QC_TRUE_PORTRAIT[];
+    static const char KEY_QC_SUPPORTED_TRUE_PORTRAIT_MODES[];
+
+    //See more
+    static const char KEY_QC_SEE_MORE[];
+    static const char KEY_QC_SUPPORTED_SEE_MORE_MODES[];
+
+    //Still more
+    static const char KEY_QC_STILL_MORE[];
+    static const char KEY_QC_SUPPORTED_STILL_MORE_MODES[];
+
+    //Longshot
+    static const char KEY_QC_LONGSHOT_SUPPORTED[];
+
+    //ZSL+HDR
+    static const char KEY_QC_ZSL_HDR_SUPPORTED[];
+
+    // Values for Touch AF/AEC
+    static const char TOUCH_AF_AEC_OFF[];
+    static const char TOUCH_AF_AEC_ON[];
+
+    // Values for Scene mode
+    static const char SCENE_MODE_ASD[];
+    static const char SCENE_MODE_BACKLIGHT[];
+    static const char SCENE_MODE_FLOWERS[];
+    static const char SCENE_MODE_AR[];
+    static const char SCENE_MODE_HDR[];
+    static const char PIXEL_FORMAT_YUV420SP_ADRENO[]; // ADRENO
+    static const char PIXEL_FORMAT_YV12[]; // NV12
+    static const char PIXEL_FORMAT_NV12[]; //NV12
+    static const char QC_PIXEL_FORMAT_NV12_VENUS[]; //NV12 VENUS
+
+    // Values for raw picture format
+    static const char QC_PIXEL_FORMAT_YUV_RAW_8BIT_YUYV[];
+    static const char QC_PIXEL_FORMAT_YUV_RAW_8BIT_YVYU[];
+    static const char QC_PIXEL_FORMAT_YUV_RAW_8BIT_UYVY[];
+    static const char QC_PIXEL_FORMAT_YUV_RAW_8BIT_VYUY[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_8BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_10BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_QCOM_RAW_12BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_8BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_10BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_MIPI_RAW_12BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_8BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_10BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_QCOM_12BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_8BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_10BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_MIPI_12BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN8_8BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_8BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_10BGGR[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12GBRG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12GRBG[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12RGGB[];
+    static const char QC_PIXEL_FORMAT_BAYER_IDEAL_PLAIN16_12BGGR[];
+
+    // ISO values
+    static const char ISO_AUTO[];
+    static const char ISO_HJR[];
+    static const char ISO_100[];
+    static const char ISO_200[];
+    static const char ISO_400[];
+    static const char ISO_800[];
+    static const char ISO_1600[];
+    static const char ISO_3200[];
+    static const char ISO_MANUAL[];
+
+    // Values for auto exposure settings.
+    static const char AUTO_EXPOSURE_FRAME_AVG[];
+    static const char AUTO_EXPOSURE_CENTER_WEIGHTED[];
+    static const char AUTO_EXPOSURE_SPOT_METERING[];
+    static const char AUTO_EXPOSURE_SMART_METERING[];
+    static const char AUTO_EXPOSURE_USER_METERING[];
+    static const char AUTO_EXPOSURE_SPOT_METERING_ADV[];
+    static const char AUTO_EXPOSURE_CENTER_WEIGHTED_ADV[];
+
+    static const char KEY_QC_SHARPNESS[];
+    static const char KEY_QC_MIN_SHARPNESS[];
+    static const char KEY_QC_MAX_SHARPNESS[];
+    static const char KEY_QC_SHARPNESS_STEP[];
+    static const char KEY_QC_CONTRAST[];
+    static const char KEY_QC_MIN_CONTRAST[];
+    static const char KEY_QC_MAX_CONTRAST[];
+    static const char KEY_QC_CONTRAST_STEP[];
+    static const char KEY_QC_SATURATION[];
+    static const char KEY_QC_MIN_SATURATION[];
+    static const char KEY_QC_MAX_SATURATION[];
+    static const char KEY_QC_SATURATION_STEP[];
+    static const char KEY_QC_BRIGHTNESS[];
+    static const char KEY_QC_MIN_BRIGHTNESS[];
+    static const char KEY_QC_MAX_BRIGHTNESS[];
+    static const char KEY_QC_BRIGHTNESS_STEP[];
+    static const char KEY_QC_SCE_FACTOR[];
+    static const char KEY_QC_MIN_SCE_FACTOR[];
+    static const char KEY_QC_MAX_SCE_FACTOR[];
+    static const char KEY_QC_SCE_FACTOR_STEP[];
+
+    static const char KEY_QC_HISTOGRAM[] ;
+    static const char KEY_QC_SUPPORTED_HISTOGRAM_MODES[] ;
+    static const char KEY_QC_SUPPORTED_HDR_NEED_1X[];
+    static const char KEY_QC_HDR_NEED_1X[];
+    static const char KEY_QC_VIDEO_HDR[];
+    static const char KEY_QC_VT_ENABLE[];
+    static const char KEY_QC_SUPPORTED_VIDEO_HDR_MODES[];
+    static const char KEY_QC_SENSOR_HDR[];
+    static const char KEY_QC_SUPPORTED_SENSOR_HDR_MODES[];
+    static const char KEY_QC_RDI_MODE[];
+    static const char KEY_QC_SUPPORTED_RDI_MODES[];
+    static const char KEY_QC_SECURE_MODE[];
+    static const char KEY_QC_SUPPORTED_SECURE_MODES[];
+
+    // Values for SKIN TONE ENHANCEMENT
+    static const char SKIN_TONE_ENHANCEMENT_ENABLE[] ;
+    static const char SKIN_TONE_ENHANCEMENT_DISABLE[] ;
+
+    // Values for Denoise
+    static const char DENOISE_OFF[] ;
+    static const char DENOISE_ON[] ;
+
+    // Values for auto exposure settings.
+    static const char FOCUS_ALGO_AUTO[];
+    static const char FOCUS_ALGO_SPOT_METERING[];
+    static const char FOCUS_ALGO_CENTER_WEIGHTED[];
+    static const char FOCUS_ALGO_FRAME_AVERAGE[];
+
+    // Values for AE Bracketing settings.
+    static const char AE_BRACKET_OFF[];
+    static const char AE_BRACKET[];
+
+    // Values for AF Bracketing settings.
+    static const char AF_BRACKET_OFF[];
+    static const char AF_BRACKET_ON[];
+
+    // Values for Refocus settings.
+    static const char RE_FOCUS_OFF[];
+    static const char RE_FOCUS_ON[];
+
+    // Values for Chroma Flash settings.
+    static const char CHROMA_FLASH_OFF[];
+    static const char CHROMA_FLASH_ON[];
+
+    // Values for Opti Zoom settings.
+    static const char OPTI_ZOOM_OFF[];
+    static const char OPTI_ZOOM_ON[];
+
+    // Values for Still More settings.
+    static const char STILL_MORE_OFF[];
+    static const char STILL_MORE_ON[];
+
+    // Values for HDR mode settings.
+    static const char HDR_MODE_SENSOR[];
+    static const char HDR_MODE_MULTI_FRAME[];
+
+    // Values for True Portrait settings.
+    static const char TRUE_PORTRAIT_OFF[];
+    static const char TRUE_PORTRAIT_ON[];
+
+    // Values for HFR settings.
+    static const char VIDEO_HFR_OFF[];
+    static const char VIDEO_HFR_2X[];
+    static const char VIDEO_HFR_3X[];
+    static const char VIDEO_HFR_4X[];
+    static const char VIDEO_HFR_5X[];
+    static const char VIDEO_HFR_6X[];
+    static const char VIDEO_HFR_7X[];
+    static const char VIDEO_HFR_8X[];
+    static const char VIDEO_HFR_9X[];
+
+    // Values for feature on/off settings.
+    static const char VALUE_OFF[];
+    static const char VALUE_ON[];
+
+    // Values for feature enable/disable settings.
+    static const char VALUE_ENABLE[];
+    static const char VALUE_DISABLE[];
+
+    // Values for feature true/false settings.
+    static const char VALUE_FALSE[];
+    static const char VALUE_TRUE[];
+
+    //Values for flip settings
+    static const char FLIP_MODE_OFF[];
+    static const char FLIP_MODE_V[];
+    static const char FLIP_MODE_H[];
+    static const char FLIP_MODE_VH[];
+
+    //Values for CDS Mode
+    static const char CDS_MODE_OFF[];
+    static const char CDS_MODE_ON[];
+    static const char CDS_MODE_AUTO[];
+
+    static const char KEY_SELECTED_AUTO_SCENE[];
+
+    // Values for Video rotation
+    static const char VIDEO_ROTATION_0[];
+    static const char VIDEO_ROTATION_90[];
+    static const char VIDEO_ROTATION_180[];
+    static const char VIDEO_ROTATION_270[];
+
+    enum {
+        CAMERA_ORIENTATION_UNKNOWN = 0,
+        CAMERA_ORIENTATION_PORTRAIT = 1,
+        CAMERA_ORIENTATION_LANDSCAPE = 2,
+    };
+
+    template <typename valueType> struct QCameraMap {
+        const char *const desc;
+        valueType val;
+    };
+
+    friend class QCameraReprocScaleParam;
+    QCameraReprocScaleParam m_reprocScaleParam;
+
+    void getSupportedHfrSizes(Vector<Size> &sizes);
+    void setPreviewFrameRateMode(const char *mode);
+    const char *getPreviewFrameRateMode() const;
+    void setTouchIndexAec(int x, int y);
+    void getTouchIndexAec(int *x, int *y);
+    void setTouchIndexAf(int x, int y);
+    void getTouchIndexAf(int *x, int *y);
+
+    int32_t init(cam_capability_t *, mm_camera_vtbl_t *, QCameraAdjustFPS *);
+    void deinit();
+    int32_t assign(QCameraParameters& params);
+    int32_t initDefaultParameters();
+    int32_t updateParameters(QCameraParameters&, bool &needRestart);
+    int32_t commitParameters();
+    int getPreviewHalPixelFormat() const;
+    int32_t getStreamRotation(cam_stream_type_t streamType,
+                               cam_pp_feature_config_t &featureConfig,
+                               cam_dimension_t &dim);
+    int32_t getStreamFormat(cam_stream_type_t streamType,
+                             cam_format_t &format);
+    int32_t getStreamDimension(cam_stream_type_t streamType,
+                                cam_dimension_t &dim);
+    void getThumbnailSize(int *width, int *height) const;
+
+    uint8_t getZSLBurstInterval();
+    uint8_t getZSLQueueDepth();
+    uint8_t getZSLBackLookCount();
+    uint8_t getMaxUnmatchedFramesInQueue();
+    bool isZSLMode() {return m_bZslMode;};
+    bool isRdiMode() {return m_bRdiMode;};
+    bool isSecureMode() {return m_bSecureMode;};
+    bool isNoDisplayMode() {return m_bNoDisplayMode;};
+    bool isWNREnabled() {return m_bWNROn;};
+    bool isTNRPreviewEnabled() {return m_bTNRPreviewOn;};
+    bool isTNRVideoEnabled() {return m_bTNRVideoOn;};
+    bool isHfrMode() {return m_bHfrMode;};
+    void getHfrFps(cam_fps_range_t &pFpsRange) { pFpsRange = m_hfrFpsRange;};
+    uint8_t getNumOfSnapshots();
+    uint8_t getNumOfRetroSnapshots();
+    uint8_t getNumOfExtraHDRInBufsIfNeeded();
+    uint8_t getNumOfExtraHDROutBufsIfNeeded();
+    uint8_t getBurstNum();
+    int getBurstLEDOnPeriod();
+    int getRetroActiveBurstNum();
+    bool getRecordingHintValue() {return m_bRecordingHint;}; // return local copy of video hint
+    int setRecordingHintValue(int32_t value); // set local copy of video hint and send to server
+                                              // no change in parameters value
+    uint32_t getJpegQuality();
+    uint32_t getRotation();
+    uint32_t getDeviceRotation();
+    uint32_t getJpegExifRotation();
+    bool useJpegExifRotation();
+    int32_t getEffectValue();
+
+    int32_t getExifDateTime(String8 &dateTime, String8 &subsecTime);
+    int32_t getExifFocalLength(rat_t *focalLenght);
+    uint16_t getExifIsoSpeed();
+    int32_t getExifGpsProcessingMethod(char *gpsProcessingMethod, uint32_t &count);
+    int32_t getExifLatitude(rat_t *latitude, char *latRef);
+    int32_t getExifLongitude(rat_t *longitude, char *lonRef);
+    int32_t getExifAltitude(rat_t *altitude, char *altRef);
+    int32_t getExifGpsDateTimeStamp(char *gpsDateStamp, uint32_t bufLen, rat_t *gpsTimeStamp);
+    int32_t updateFocusDistances(cam_focus_distances_info_t *focusDistances);
+
+    bool isAEBracketEnabled();
+    int32_t setAEBracketing();
+    bool isFpsDebugEnabled() {return m_bDebugFps;};
+    bool isHistogramEnabled() {return m_bHistogramEnabled;};
+    bool isSceneSelectionEnabled() {return m_bSceneSelection;};
+    int32_t setSelectedScene(cam_scene_mode_type scene);
+    cam_scene_mode_type getSelectedScene();
+    bool isFaceDetectionEnabled() {return ((m_nFaceProcMask & CAM_FACE_PROCESS_MASK_DETECTION) != 0);};
+    bool getFaceDetectionOption() { return  m_bFaceDetectionOn;}
+    int32_t setFaceDetectionOption(bool enabled);
+    int32_t setHistogram(bool enabled);
+    int32_t setFaceDetection(bool enabled, bool initCommit);
+    int32_t setFrameSkip(enum msm_vfe_frame_skip_pattern pattern);
+    qcamera_thermal_mode getThermalMode() {return m_ThermalMode;};
+    int32_t updateRecordingHintValue(int32_t value);
+    int32_t setHDRAEBracket(cam_exp_bracketing_t hdrBracket);
+    bool isHDREnabled();
+    bool isAutoHDREnabled();
+    int32_t stopAEBracket();
+    int32_t updateFlash(bool commitSettings);
+    int32_t updateRAW(cam_dimension_t max_dim);
+    bool isAVTimerEnabled();
+    bool isDISEnabled();
+    cam_is_type_t getISType();
+    uint8_t getMobicatMask();
+
+    cam_focus_mode_type getFocusMode() const {return mFocusMode;};
+    int32_t setNumOfSnapshot();
+    int32_t adjustPreviewFpsRange(cam_fps_range_t *fpsRange);
+    bool isJpegPictureFormat() {return (mPictureFormat == CAM_FORMAT_JPEG);};
+    bool isNV16PictureFormat() {return (mPictureFormat == CAM_FORMAT_YUV_422_NV16);};
+    bool isNV21PictureFormat() {return (mPictureFormat == CAM_FORMAT_YUV_420_NV21);};
+    cam_denoise_process_type_t getDenoiseProcessPlate(cam_intf_parm_type_t type);
+    void getLiveSnapshotSize(cam_dimension_t &dim);
+    int32_t getRawSize(cam_dimension_t &dim) {dim = m_rawSize; return NO_ERROR;};
+    int32_t setRawSize(cam_dimension_t &dim);
+    int getFlipMode(cam_stream_type_t streamType);
+    bool isSnapshotFDNeeded();
+
+    bool isHDR1xFrameEnabled() {return m_bHDR1xFrameEnabled;}
+    bool isYUVFrameInfoNeeded();
+    const char*getFrameFmtString(cam_format_t fmt);
+    bool isHDR1xExtraBufferNeeded() {return m_bHDR1xExtraBufferNeeded;}
+    bool isHDROutputCropEnabled() {return m_bHDROutputCropEnabled;}
+
+    bool isPreviewFlipChanged() { return m_bPreviewFlipChanged; };
+    bool isVideoFlipChanged() { return m_bVideoFlipChanged; };
+    bool isSnapshotFlipChanged() { return m_bSnapshotFlipChanged; };
+    void setHDRSceneEnable(bool bflag);
+    int32_t updateAWBParams(cam_awb_params_t &awb_params);
+
+    const char *getASDStateString(cam_auto_scene_t scene);
+    bool isHDRThumbnailProcessNeeded() { return m_bHDRThumbnailProcessNeeded; };
+    int getAutoFlickerMode();
+    void setMinPpMask(uint32_t min_pp_mask) { m_nMinRequiredPpMask = min_pp_mask; };
+    bool sendStreamConfigInfo(cam_stream_size_info_t &stream_config_info);
+    bool setStreamConfigure(bool isCapture, bool previewAsPostview, bool resetConfig);
+    int32_t addOnlineRotation(uint32_t rotation, uint32_t streamId, int32_t device_rotation);
+    uint8_t getNumOfExtraBuffersForImageProc();
+    uint8_t getNumOfExtraBuffersForVideo();
+    uint8_t getNumOfExtraBuffersForPreview();
+    uint32_t getExifBufIndex(uint32_t captureIndex);
+    bool needThumbnailReprocess(uint32_t *pFeatureMask);
+    inline bool isUbiFocusEnabled() {return m_bAFBracketingOn && !m_bReFocusOn;};
+    inline bool isChromaFlashEnabled() {return m_bChromaFlashOn;};
+    inline bool isTruePortraitEnabled() {return m_bTruePortraitOn;};
+    inline size_t getTPMaxMetaSize() {
+        return m_pCapability->true_portrait_settings_need.meta_max_size;};
+    inline bool isSeeMoreEnabled() {return m_bSeeMoreOn;};
+    inline bool isStillMoreEnabled() {return m_bStillMoreOn;};
+    bool isOptiZoomEnabled();
+    int32_t commitAFBracket(cam_af_bracketing_t afBracket);
+    int32_t commitFlashBracket(cam_flash_bracketing_t flashBracket);
+    int32_t set3ALock(const char *lockStr);
+    int32_t setAndCommitZoom(int zoom_level);
+    uint8_t getBurstCountForAdvancedCapture();
+    uint32_t getNumberInBufsForSingleShot();
+    uint32_t getNumberOutBufsForSingleShot();
+    int32_t setLongshotEnable(bool enable);
+    String8 dump();
+    inline bool isUbiRefocus() {return m_bReFocusOn &&
+            (m_pCapability->refocus_af_bracketing_need.output_count > 1);};
+    inline uint32_t getRefocusMaxMetaSize() {
+            return m_pCapability->refocus_af_bracketing_need.meta_max_size;};
+    inline uint8_t getRefocusOutputCount() {
+            return m_pCapability->refocus_af_bracketing_need.output_count;};
+    inline bool generateThumbFromMain() {return isUbiFocusEnabled() ||
+            isChromaFlashEnabled() || isOptiZoomEnabled() || isUbiRefocus()
+            || isHDREnabled() || isStillMoreEnabled() || isTruePortraitEnabled(); }
+    void updateCurrentFocusPosition(cam_focus_pos_info_t &cur_pos_info);
+    void updateAEInfo(cam_3a_params_t &ae_params);
+    bool isDisplayFrameNeeded() { return m_bDisplayFrame; };
+    int32_t setDisplayFrame(bool enabled) {m_bDisplayFrame=enabled; return 0;};
+    bool isAdvCamFeaturesEnabled() {return isUbiFocusEnabled() ||
+            isChromaFlashEnabled() || m_bOptiZoomOn || isHDREnabled() ||
+            isHDREnabled() || isStillMoreEnabled();}
+    int32_t setAecLock(const char *aecStr);
+    int32_t updateDebugLevel();
+    bool is4k2kVideoResolution();
+    int getBrightness();
+    int32_t updateOisValue(bool oisValue);
+    int32_t setIntEvent(cam_int_evt_params_t params);
+    void setOfflineRAW();
+    bool getofflineRAW() {return mOfflineRAW;}
+    int32_t updatePpFeatureMask(cam_stream_type_t stream_type);
+    int32_t setStreamPpMask(cam_stream_type_t stream_type, uint32_t pp_mask);
+    int32_t getStreamPpMask(cam_stream_type_t stream_type, uint32_t &pp_mask);
+    int32_t getSharpness() {return m_nSharpness;};
+    int32_t getEffect() {return mParmEffect;};
+    int32_t updateFlashMode(cam_flash_mode_t flash_mode);
+    int32_t configureFlash(cam_capture_frame_config_t &frame_config);
+    int32_t configureAEBracketing(cam_capture_frame_config_t &frame_config);
+    int32_t configureHDRBracketing(cam_capture_frame_config_t &frame_config);
+    int32_t configFrameCapture(bool commitSettings);
+    int32_t resetFrameCapture(bool commitSettings);
+    cam_still_more_t getStillMoreSettings() {return m_stillmore_config;};
+    void setStillMoreSettings(cam_still_more_t stillmore_config)
+            {m_stillmore_config = stillmore_config;};
+    cam_still_more_t getStillMoreCapability()
+            {return m_pCapability->stillmore_settings_need;};
+
+    int32_t getZoomLevel(){return mZoomLevel;};
+    int32_t getParmZoomLevel(){return mParmZoomLevel;};
+    int8_t  getReprocCount(){return mTotalPPCount;};
+    int8_t  getCurPPCount(){return mCurPPCount;};
+    void    setReprocCount();
+    void    setCurPPCount(int8_t count) {mCurPPCount = count;};
+    int32_t  updateCurrentFocusPosition(int32_t pos);
+    int32_t setToneMapMode(uint32_t value, bool initCommit);
+    void setTintless(bool enable);
+    void setBufBatchCount(int8_t buf_cnt);
+    int8_t  getBufBatchCount() {return mBufBatchCnt;};
+
+    cam_capture_frame_config_t getCaptureFrameConfig()
+            { return m_captureFrameConfig; };
+    void setJpegRotation(int rotation);
+    uint32_t getJpegRotation() { return mJpegRotation;};
+
+private:
+    int32_t setPreviewSize(const QCameraParameters& );
+    int32_t setVideoSize(const QCameraParameters& );
+    int32_t setPictureSize(const QCameraParameters& );
+    int32_t setLiveSnapshotSize(const QCameraParameters& );
+    int32_t setPreviewFormat(const QCameraParameters& );
+    int32_t setPictureFormat(const QCameraParameters& );
+    int32_t setOrientation(const QCameraParameters& );
+    int32_t setJpegThumbnailSize(const QCameraParameters& );
+    int32_t setJpegQuality(const QCameraParameters& );
+    int32_t setPreviewFpsRange(const QCameraParameters& );
+    int32_t setPreviewFrameRate(const QCameraParameters& );
+    int32_t setAutoExposure(const QCameraParameters& );
+    int32_t setEffect(const QCameraParameters& );
+    int32_t setBrightness(const QCameraParameters& );
+    int32_t setFocusMode(const QCameraParameters& );
+    int32_t setFocusPosition(const QCameraParameters& );
+    int32_t setSharpness(const QCameraParameters& );
+    int32_t setSaturation(const QCameraParameters& );
+    int32_t setContrast(const QCameraParameters& );
+    int32_t setSkinToneEnhancement(const QCameraParameters& );
+    int32_t setSceneDetect(const QCameraParameters& );
+    int32_t setVideoHDR(const QCameraParameters& );
+    int32_t setVtEnable(const QCameraParameters& );
+    int32_t setZoom(const QCameraParameters& );
+    int32_t setISOValue(const QCameraParameters& );
+    int32_t setContinuousISO(const QCameraParameters& );
+    int32_t setExposureTime(const QCameraParameters& );
+    int32_t setRotation(const QCameraParameters& );
+    int32_t setVideoRotation(const QCameraParameters& );
+    int32_t setFlash(const QCameraParameters& );
+    int32_t setAecLock(const QCameraParameters& );
+    int32_t setAwbLock(const QCameraParameters& );
+    int32_t setMCEValue(const QCameraParameters& );
+    int32_t setDISValue(const QCameraParameters& params);
+    int32_t setLensShadeValue(const QCameraParameters& );
+    int32_t setExposureCompensation(const QCameraParameters& );
+    int32_t setWhiteBalance(const QCameraParameters& );
+    int32_t setManualWhiteBalance(const QCameraParameters& );
+    int32_t setAntibanding(const QCameraParameters& );
+    int32_t setFocusAreas(const QCameraParameters& );
+    int32_t setMeteringAreas(const QCameraParameters& );
+    int32_t setSceneMode(const QCameraParameters& );
+    int32_t setSelectableZoneAf(const QCameraParameters& );
+    int32_t setAEBracket(const QCameraParameters& );
+    int32_t setAFBracket(const QCameraParameters& );
+    int32_t setReFocus(const QCameraParameters& );
+    int32_t setChromaFlash(const QCameraParameters& );
+    int32_t setOptiZoom(const QCameraParameters& );
+    int32_t setHDRMode(const QCameraParameters& );
+    int32_t setHDRNeed1x(const QCameraParameters& );
+    int32_t setTruePortrait(const QCameraParameters& );
+    int32_t setSeeMore(const QCameraParameters& );
+    int32_t setStillMore(const QCameraParameters& );
+    int32_t setRedeyeReduction(const QCameraParameters& );
+    int32_t setGpsLocation(const QCameraParameters& );
+    int32_t setRecordingHint(const QCameraParameters& );
+    int32_t setNoDisplayMode(const QCameraParameters& );
+    int32_t setWaveletDenoise(const QCameraParameters& );
+    int32_t setTemporalDenoise(const QCameraParameters&);
+    int32_t setZslMode(const QCameraParameters& );
+    int32_t setZslAttributes(const QCameraParameters& );
+    int32_t setAutoHDR(const QCameraParameters& params);
+    int32_t setCameraMode(const QCameraParameters& );
+    int32_t setSceneSelectionMode(const QCameraParameters& params);
+    int32_t setFaceRecognition(const QCameraParameters& );
+    int32_t setFlip(const QCameraParameters& );
+    int32_t setBurstNum(const QCameraParameters& params);
+    int32_t setRetroActiveBurstNum(const QCameraParameters& params);
+    int32_t setBurstLEDOnPeriod(const QCameraParameters& params);
+    int32_t setSnapshotFDReq(const QCameraParameters& );
+    int32_t setStatsDebugMask();
+    int32_t setPAAF();
+    int32_t setTintlessValue(const QCameraParameters& params);
+    int32_t setCDSMode(const QCameraParameters& params);
+    int32_t setMobicat(const QCameraParameters& params);
+    int32_t setRdiMode(const QCameraParameters& );
+    int32_t setSecureMode(const QCameraParameters& );
+    int32_t setAutoExposure(const char *autoExp);
+    int32_t setPreviewFpsRange(int min_fps,int max_fps,
+            int vid_min_fps,int vid_max_fps);
+    int32_t setEffect(const char *effect);
+    int32_t setBrightness(int brightness);
+    int32_t setFocusMode(const char *focusMode);
+    int32_t setFocusPosition(const char *typeStr, const char *posStr);
+    int32_t setSharpness(int sharpness);
+    int32_t setSaturation(int saturation);
+    int32_t setContrast(int contrast);
+    int32_t setSkinToneEnhancement(int sceFactor);
+    int32_t setSceneDetect(const char *scendDetect);
+    int32_t setVideoHDR(const char *videoHDR);
+    int32_t setSensorSnapshotHDR(const char *snapshotHDR);
+    int32_t setVtEnable(const char *vtEnable);
+    int32_t setZoom(int zoom_level);
+    int32_t setISOValue(const char *isoValue);
+    int32_t setContinuousISO(const char *isoValue);
+    int32_t setExposureTime(const char *expTimeStr);
+    int32_t setFlash(const char *flashStr);
+    int32_t setAwbLock(const char *awbStr);
+    int32_t setMCEValue(const char *mceStr);
+    int32_t setDISValue(const char *disStr);
+    int32_t setHighFrameRate(const int32_t hfrMode);
+    int32_t setLensShadeValue(const char *lensShadeStr);
+    int32_t setExposureCompensation(int expComp);
+    int32_t setWhiteBalance(const char *wbStr);
+    int32_t setWBManualCCT(const char *cctStr);
+    int32_t setManualWBGains(const char *gainStr);
+    int32_t setAntibanding(const char *antiBandingStr);
+    int32_t setFocusAreas(const char *focusAreasStr);
+    int32_t setMeteringAreas(const char *meteringAreasStr);
+    int32_t setSceneMode(const char *sceneModeStr);
+    int32_t setSelectableZoneAf(const char *selZoneAFStr);
+    int32_t setAEBracket(const char *aecBracketStr);
+    int32_t setAFBracket(const char *afBracketStr);
+    int32_t setReFocus(const char *reFocusStr);
+    int32_t setChromaFlash(const char *chromaFlashStr);
+    int32_t setOptiZoom(const char *optiZoomStr);
+    int32_t setHDRMode(const char *optiZoomStr);
+    int32_t setHDRNeed1x(const char *optiZoomStr);
+    int32_t setTruePortrait(const char *truePortraitStr);
+    int32_t setSeeMore(const char *SeeMoreStr);
+    int32_t setStillMore(const char *StillMoreStr);
+    int32_t setRedeyeReduction(const char *redeyeStr);
+    int32_t setWaveletDenoise(const char *wnrStr);
+    int32_t setFaceRecognition(const char *faceRecog, uint32_t maxFaces);
+    int32_t setTintlessValue(const char *tintStr);
+    bool UpdateHFRFrameRate(const QCameraParameters& params);
+    int32_t setRdiMode(const char *str);
+    int32_t setSecureMode(const char *str);
+    int32_t setCDSMode(int32_t cds_mode, bool initCommit);
+
+    int32_t parseGains(const char *gainStr, float &r_gain,
+            float &g_gain, float &b_gain);
+    int32_t parse_pair(const char *str, int *first, int *second,
+                       char delim, char **endptr);
+    void parseSizesList(const char *sizesStr, Vector<Size> &sizes);
+    int32_t parseNDimVector(const char *str, int *num, int N, char delim);
+    int32_t parseCameraAreaString(const char *str, int max_num_areas,
+                                  cam_area_t *pAreas, int& num_areas_found);
+    bool validateCameraAreas(cam_area_t *areas, int num_areas);
+    int parseGPSCoordinate(const char *coord_str, rat_t *coord);
+    int32_t getRational(rat_t *rat, int num, int denom);
+    String8 createSizesString(const cam_dimension_t *sizes, size_t len);
+    String8 createHfrValuesString(const cam_hfr_info_t *values, size_t len,
+            const QCameraMap<cam_hfr_mode_t> *map, size_t map_len);
+    String8 createHfrSizesString(const cam_hfr_info_t *values, size_t len);
+    String8 createFpsRangeString(const cam_fps_range_t *fps,
+            size_t len, int &default_fps_index);
+    String8 createFpsString(cam_fps_range_t &fps);
+    String8 createZoomRatioValuesString(uint32_t *zoomRatios, size_t length);
+
+    // ops for batch set/get params with server
+    int32_t initBatchUpdate(parm_buffer_t *p_table);
+    int32_t commitSetBatch();
+    int32_t commitGetBatch();
+
+    // ops to tempororily update parameter entries and commit
+    int32_t updateParamEntry(const char *key, const char *value);
+    int32_t commitParamChanges();
+
+    // Map from strings to values
+    static const cam_dimension_t THUMBNAIL_SIZES_MAP[];
+    static const QCameraMap<cam_auto_exposure_mode_type> AUTO_EXPOSURE_MAP[];
+    static const QCameraMap<cam_format_t> PREVIEW_FORMATS_MAP[];
+    static const QCameraMap<cam_format_t> PICTURE_TYPES_MAP[];
+    static const QCameraMap<cam_focus_mode_type> FOCUS_MODES_MAP[];
+    static const QCameraMap<cam_effect_mode_type> EFFECT_MODES_MAP[];
+    static const QCameraMap<cam_scene_mode_type> SCENE_MODES_MAP[];
+    static const QCameraMap<cam_flash_mode_t> FLASH_MODES_MAP[];
+    static const QCameraMap<cam_focus_algorithm_type> FOCUS_ALGO_MAP[];
+    static const QCameraMap<cam_wb_mode_type> WHITE_BALANCE_MODES_MAP[];
+    static const QCameraMap<cam_antibanding_mode_type> ANTIBANDING_MODES_MAP[];
+    static const QCameraMap<cam_iso_mode_type> ISO_MODES_MAP[];
+    static const QCameraMap<cam_hfr_mode_t> HFR_MODES_MAP[];
+    static const QCameraMap<cam_bracket_mode> BRACKETING_MODES_MAP[];
+    static const QCameraMap<int> ON_OFF_MODES_MAP[];
+    static const QCameraMap<int> ENABLE_DISABLE_MODES_MAP[];
+    static const QCameraMap<int> DENOISE_ON_OFF_MODES_MAP[];
+    static const QCameraMap<int> TRUE_FALSE_MODES_MAP[];
+    static const QCameraMap<int> TOUCH_AF_AEC_MODES_MAP[];
+    static const QCameraMap<cam_flip_t> FLIP_MODES_MAP[];
+    static const QCameraMap<int> AF_BRACKETING_MODES_MAP[];
+    static const QCameraMap<int> RE_FOCUS_MODES_MAP[];
+    static const QCameraMap<int> CHROMA_FLASH_MODES_MAP[];
+    static const QCameraMap<int> OPTI_ZOOM_MODES_MAP[];
+    static const QCameraMap<int> TRUE_PORTRAIT_MODES_MAP[];
+    static const QCameraMap<cam_cds_mode_type_t> CDS_MODES_MAP[];
+    static const QCameraMap<int> HDR_MODES_MAP[];
+    static const QCameraMap<int> VIDEO_ROTATION_MODES_MAP[];
+    static const QCameraMap<int> SEE_MORE_MODES_MAP[];
+    static const QCameraMap<int> STILL_MORE_MODES_MAP[];
+
+    cam_capability_t *m_pCapability;
+    mm_camera_vtbl_t *m_pCamOpsTbl;
+    QCameraHeapMemory *m_pParamHeap;
+    parm_buffer_t     *m_pParamBuf;  // ptr to param buf in m_pParamHeap
+    cam_is_type_t mIsType;
+
+    bool m_bZslMode;                // if ZSL is enabled
+    bool m_bZslMode_new;
+    bool m_bForceZslMode;
+    bool m_bRecordingHint;          // local copy of recording hint
+    bool m_bRecordingHint_new;
+    bool m_bHistogramEnabled;       // if histogram is enabled
+    uint32_t m_nFaceProcMask;       // face process mask
+    bool m_bFaceDetectionOn;        //  if face Detection turned on by user
+    bool m_bDebugFps;               // if FPS need to be logged
+    cam_focus_mode_type mFocusMode;
+    cam_format_t mPreviewFormat;
+    int32_t mPictureFormat;         // could be CAMERA_PICTURE_TYPE_JPEG or cam_format_t
+    bool m_bNeedRestart;            // if preview needs restart after parameters updated
+    bool m_bNoDisplayMode;
+    bool m_bWNROn;
+    bool m_bTNRPreviewOn;
+    bool m_bTNRVideoOn;
+    bool m_bInited;
+    uint8_t m_nBurstNum;
+    int m_nRetroBurstNum;
+    int m_nBurstLEDOnPeriod;
+    cam_exp_bracketing_t m_AEBracketingClient;
+    bool m_bUpdateEffects;          // Cause reapplying of effects
+    bool m_bSceneTransitionAuto;    // Indicate that scene has changed to Auto
+    bool m_bPreviewFlipChanged;        // if flip setting for preview changed
+    bool m_bVideoFlipChanged;          // if flip setting for video changed
+    bool m_bSnapshotFlipChanged;       // if flip setting for snapshot changed
+    bool m_bFixedFrameRateSet;      // Indicates that a fixed frame rate is set
+    qcamera_thermal_mode m_ThermalMode; // adjust fps vs adjust frameskip
+    cam_dimension_t m_LiveSnapshotSize; // live snapshot size
+    cam_dimension_t m_rawSize; // live snapshot size
+    bool m_bHDREnabled;             // if HDR is enabled
+    bool m_bAVTimerEnabled;    //if AVTimer is enabled
+    bool m_bDISEnabled;
+    bool m_bOISEnabled;
+    cam_still_more_t m_stillmore_config;
+
+    uint8_t m_MobiMask;
+    QCameraAdjustFPS *m_AdjustFPS;
+    bool m_bHDR1xFrameEnabled;          // if frame with exposure compensation 0 during HDR is enabled
+    bool m_HDRSceneEnabled; // Auto HDR indication
+    bool m_bHDRThumbnailProcessNeeded;        // if thumbnail need to be processed for HDR
+    bool m_bHDR1xExtraBufferNeeded;     // if extra frame with exposure compensation 0 during HDR is needed
+    bool m_bHDROutputCropEnabled;     // if HDR output frame need to be scaled to user resolution
+    DefaultKeyedVector<String8,String8> m_tempMap; // map for temororily store parameters to be set
+    cam_fps_range_t m_default_fps_range;
+    bool m_bAFBracketingOn;
+    bool m_bReFocusOn;
+    bool m_bChromaFlashOn;
+    bool m_bOptiZoomOn;
+    bool m_bSceneSelection;
+    Mutex m_SceneSelectLock;
+    cam_scene_mode_type m_SelectedScene;
+    bool m_bSeeMoreOn;
+    bool m_bStillMoreOn;
+    cam_fps_range_t m_hfrFpsRange;
+    bool m_bHfrMode;
+    bool m_bSensorHDREnabled;             // if HDR is enabled
+    bool m_bRdiMode;                // if RDI mode
+    bool m_bUbiRefocus;
+    bool m_bDisplayFrame;
+    bool m_bSecureMode;
+    bool m_bAeBracketingEnabled;
+    int32_t mFlashValue;
+    int32_t mFlashDaemonValue;
+    int32_t mHfrMode;
+    bool m_bHDRModeSensor;
+    bool mOfflineRAW;
+    bool m_bTruePortraitOn;
+    uint32_t m_nMinRequiredPpMask;
+    uint32_t mStreamPpMask[CAM_STREAM_TYPE_MAX];
+    int32_t m_nSharpness;
+    int8_t mTotalPPCount;
+    int8_t mCurPPCount;
+    int32_t mZoomLevel;
+    bool m_bStreamsConfigured;
+    int32_t mParmZoomLevel;
+    int32_t mCds_mode;
+    int32_t mParmEffect;
+
+    cam_capture_frame_config_t m_captureFrameConfig;
+    int8_t mBufBatchCnt;
+
+    uint32_t mRotation;
+    uint32_t mJpegRotation;
+};
+
+}; // namespace qcamera
+
+#endif
diff --git a/camera/QCamera2/HAL/QCameraPostProc.cpp b/camera/QCamera2/HAL/QCameraPostProc.cpp
new file mode 100644
index 0000000..b531586
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraPostProc.cpp
@@ -0,0 +1,3213 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+#define LOG_TAG "QCameraPostProc"
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <utils/Errors.h>
+#include <utils/Trace.h>
+
+#include "QCamera2HWI.h"
+#include "QCameraPostProc.h"
+
+namespace qcamera {
+
+const char *QCameraPostProcessor::STORE_LOCATION = "/sdcard/img_%d.jpg";
+
+#define FREE_JPEG_OUTPUT_BUFFER(ptr,cnt)     \
+    int jpeg_bufs; \
+    for (jpeg_bufs = 0; jpeg_bufs < (int)cnt; jpeg_bufs++)  { \
+      if (ptr[jpeg_bufs] != NULL) { \
+          free(ptr[jpeg_bufs]); \
+          ptr[jpeg_bufs] = NULL; \
+      } \
+    }
+
+/*===========================================================================
+ * FUNCTION   : QCameraPostProcessor
+ *
+ * DESCRIPTION: constructor of QCameraPostProcessor.
+ *
+ * PARAMETERS :
+ *   @cam_ctrl : ptr to HWI object
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraPostProcessor::QCameraPostProcessor(QCamera2HardwareInterface *cam_ctrl)
+    : m_parent(cam_ctrl),
+      mJpegCB(NULL),
+      mJpegUserData(NULL),
+      mJpegClientHandle(0),
+      mJpegSessionId(0),
+      m_pJpegExifObj(NULL),
+      m_bThumbnailNeeded(TRUE),
+      mTotalNumReproc(0),
+      m_bInited(FALSE),
+      m_inputPPQ(releasePPInputData, this),
+      m_ongoingPPQ(releaseOngoingPPData, this),
+      m_inputJpegQ(releaseJpegData, this),
+      m_ongoingJpegQ(releaseJpegData, this),
+      m_inputRawQ(releaseRawData, this),
+      mSaveFrmCnt(0),
+      mUseSaveProc(false),
+      mUseJpegBurst(false),
+      mJpegMemOpt(true),
+      m_JpegOutputMemCount(0),
+      mNewJpegSessionNeeded(true),
+      m_bufCountPPQ(0),
+      m_PPindex(0)
+{
+    memset(&mJpegHandle, 0, sizeof(mJpegHandle));
+    memset(&m_pJpegOutputMem, 0, sizeof(m_pJpegOutputMem));
+    memset(mPPChannels, 0, sizeof(mPPChannels));
+    m_DataMem = NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraPostProcessor
+ *
+ * DESCRIPTION: deconstructor of QCameraPostProcessor.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraPostProcessor::~QCameraPostProcessor()
+{
+    FREE_JPEG_OUTPUT_BUFFER(m_pJpegOutputMem,m_JpegOutputMemCount);
+    if (m_pJpegExifObj != NULL) {
+        delete m_pJpegExifObj;
+        m_pJpegExifObj = NULL;
+    }
+    for (int8_t i = 0; i < mTotalNumReproc; i++) {
+        QCameraChannel *pChannel = mPPChannels[i];
+        if ( pChannel != NULL ) {
+            pChannel->stop();
+            delete pChannel;
+            pChannel = NULL;
+            m_parent->mParameters.setCurPPCount((int8_t)
+                    (m_parent->mParameters.getCurPPCount() - 1));
+        }
+    }
+    mTotalNumReproc = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : init
+ *
+ * DESCRIPTION: initialization of postprocessor
+ *
+ * PARAMETERS :
+ *   @jpeg_cb      : callback to handle jpeg event from mm-camera-interface
+ *   @user_data    : user data ptr for jpeg callback
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::init(jpeg_encode_callback_t jpeg_cb, void *user_data)
+{
+    mJpegCB = jpeg_cb;
+    mJpegUserData = user_data;
+    mm_dimension max_size;
+
+    if ((0 > m_parent->m_max_pic_width) || (0 > m_parent->m_max_pic_height)) {
+        ALOGE("%s : Negative dimension %dx%d", __func__,
+                m_parent->m_max_pic_width, m_parent->m_max_pic_height);
+        return BAD_VALUE;
+    }
+
+    //set max pic size
+    memset(&max_size, 0, sizeof(mm_dimension));
+    max_size.w = (uint32_t)m_parent->m_max_pic_width;
+    max_size.h = (uint32_t)m_parent->m_max_pic_height;
+
+    mJpegClientHandle = jpeg_open(&mJpegHandle, max_size);
+    if(!mJpegClientHandle) {
+        ALOGE("%s : jpeg_open did not work", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    m_dataProcTh.launch(dataProcessRoutine, this);
+    m_saveProcTh.launch(dataSaveRoutine, this);
+
+    m_parent->mParameters.setReprocCount();
+    m_bInited = TRUE;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : deinit
+ *
+ * DESCRIPTION: de-initialization of postprocessor
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::deinit()
+{
+    if (m_bInited == TRUE) {
+        m_dataProcTh.exit();
+        m_saveProcTh.exit();
+
+        if(mJpegClientHandle > 0) {
+            int rc = mJpegHandle.close(mJpegClientHandle);
+            CDBG_HIGH("%s: Jpeg closed, rc = %d, mJpegClientHandle = %x",
+                  __func__, rc, mJpegClientHandle);
+            mJpegClientHandle = 0;
+            memset(&mJpegHandle, 0, sizeof(mJpegHandle));
+        }
+        m_bInited = FALSE;
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : start
+ *
+ * DESCRIPTION: start postprocessor. Data process thread and data notify thread
+ *              will be launched.
+ *
+ * PARAMETERS :
+ *   @pSrcChannel : source channel obj ptr that possibly needs reprocess
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : if any reprocess is needed, a reprocess channel/stream
+ *              will be started.
+ *==========================================================================*/
+int32_t QCameraPostProcessor::start(QCameraChannel *pSrcChannel)
+{
+    char prop[PROPERTY_VALUE_MAX];
+    int32_t rc = NO_ERROR;
+    QCameraChannel *pInputChannel = pSrcChannel;
+
+    if (m_bInited == FALSE) {
+        ALOGE("%s: postproc not initialized yet", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    if (m_DataMem != NULL) {
+        m_DataMem->release(m_DataMem);
+        m_DataMem = NULL;
+    }
+
+    if (pInputChannel == NULL) {
+        ALOGE("%s : Input Channel for pproc is NULL.", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    if ( m_parent->needReprocess() ) {
+        for (int8_t i = 0; i < mTotalNumReproc; i++) {
+            // Delete previous reproc channel
+            QCameraReprocessChannel *pChannel = mPPChannels[i];
+            if (pChannel != NULL) {
+                pChannel->stop();
+                delete pChannel;
+                pChannel = NULL;
+                m_parent->mParameters.setCurPPCount((int8_t)
+                        (m_parent->mParameters.getCurPPCount() - 1));
+            }
+        }
+
+        m_bufCountPPQ = 0;
+        m_parent->mParameters.setReprocCount();
+        mTotalNumReproc = m_parent->mParameters.getReprocCount();
+        m_parent->mParameters.setCurPPCount(0);
+
+        CDBG("%s : %d: mTotalNumReproc = %d", __func__, __LINE__, mTotalNumReproc);
+
+        // Create all reproc channels and start channel
+        for (int8_t i = 0; i < mTotalNumReproc; i++) {
+            m_parent->mParameters.setCurPPCount((int8_t) (i + 1));
+            mPPChannels[i] = m_parent->addReprocChannel(pInputChannel);
+            if (mPPChannels[i] == NULL) {
+                ALOGE("%s: cannot add multi reprocess channel i = %d", __func__, i);
+                return UNKNOWN_ERROR;
+            }
+
+            rc = mPPChannels[i]->start();
+            if (rc != 0) {
+                ALOGE("%s: cannot start multi reprocess channel i = %d", __func__, i);
+                delete mPPChannels[i];
+                mPPChannels[i] = NULL;
+                return UNKNOWN_ERROR;
+            }
+            pInputChannel = static_cast<QCameraChannel *>(mPPChannels[i]);
+        }
+    }
+
+    property_get("persist.camera.longshot.save", prop, "0");
+    mUseSaveProc = atoi(prop) > 0 ? true : false;
+
+    m_PPindex = 0;
+    m_InputMetadata.clear();
+    m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_START_DATA_PROC, TRUE, FALSE);
+    m_parent->m_cbNotifier.startSnapshots();
+
+    // Create Jpeg session
+    if ( !m_parent->mParameters.getRecordingHintValue() &&
+            !m_parent->isLongshotEnabled() && (mTotalNumReproc > 0)) {
+
+        QCameraChannel *pChannel = NULL;
+        pChannel = m_parent->needReprocess() ? mPPChannels[0] : pSrcChannel;
+        QCameraStream *pSnapshotStream = NULL;
+        QCameraStream *pThumbStream = NULL;
+        bool thumb_stream_needed = ((!m_parent->isZSLMode() ||
+            (m_parent->mParameters.getFlipMode(CAM_STREAM_TYPE_SNAPSHOT) ==
+             m_parent->mParameters.getFlipMode(CAM_STREAM_TYPE_PREVIEW))) &&
+            !m_parent->mParameters.generateThumbFromMain());
+
+        for (uint32_t i = 0; i < pChannel->getNumOfStreams(); ++i) {
+            QCameraStream *pStream = pChannel->getStreamByIndex(i);
+
+            if ( NULL == pStream ) {
+                break;
+            }
+
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
+                    pStream->isOrignalTypeOf(CAM_STREAM_TYPE_SNAPSHOT)) {
+                pSnapshotStream = pStream;
+            }
+
+            if ((thumb_stream_needed) &&
+                   (pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
+                    pStream->isTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
+                    pStream->isOrignalTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
+                    pStream->isOrignalTypeOf(CAM_STREAM_TYPE_POSTVIEW))) {
+                pThumbStream = pStream;
+            }
+        }
+
+        // If thumbnail is not part of the reprocess channel, then
+        // try to get it from the source channel
+        if ((thumb_stream_needed) && (NULL == pThumbStream) &&
+                (pChannel == mPPChannels[0])) {
+            for (uint32_t i = 0; i < pSrcChannel->getNumOfStreams(); ++i) {
+                QCameraStream *pStream = pSrcChannel->getStreamByIndex(i);
+
+                if ( NULL == pStream ) {
+                    break;
+                }
+
+                if (pStream->isTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
+                        pStream->isOrignalTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
+                        pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
+                        pStream->isOrignalTypeOf(CAM_STREAM_TYPE_PREVIEW)) {
+                    pThumbStream = pStream;
+                }
+            }
+        }
+
+        if ( NULL != pSnapshotStream ) {
+            mm_jpeg_encode_params_t encodeParam;
+            memset(&encodeParam, 0, sizeof(mm_jpeg_encode_params_t));
+            rc = getJpegEncodingConfig(encodeParam, pSnapshotStream, pThumbStream);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: error getting encoding config", __func__);
+                return rc;
+            }
+            CDBG_HIGH("[KPI Perf] %s : call jpeg create_session", __func__);
+
+            rc = mJpegHandle.create_session(mJpegClientHandle,
+                    &encodeParam,
+                    &mJpegSessionId);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: error creating a new jpeg encoding session", __func__);
+                return rc;
+            }
+            mNewJpegSessionNeeded = false;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop
+ *
+ * DESCRIPTION: stop postprocessor. Data process and notify thread will be stopped.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : reprocess channel will be stopped and deleted if there is any
+ *==========================================================================*/
+int32_t QCameraPostProcessor::stop()
+{
+    if (m_bInited == TRUE) {
+        m_parent->m_cbNotifier.stopSnapshots();
+
+        if (m_DataMem != NULL) {
+            m_DataMem->release(m_DataMem);
+            m_DataMem = NULL;
+        }
+
+        // dataProc Thread need to process "stop" as sync call because abort jpeg job should be a sync call
+        m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_STOP_DATA_PROC, TRUE, TRUE);
+    }
+    // stop reproc channel if exists
+    for (int8_t i = 0; i < mTotalNumReproc; i++) {
+        QCameraReprocessChannel *pChannel = mPPChannels[i];
+        if (pChannel != NULL) {
+            pChannel->stop();
+            delete pChannel;
+            pChannel = NULL;
+            m_parent->mParameters.setCurPPCount((int8_t)
+                    (m_parent->mParameters.getCurPPCount() - 1));
+        }
+    }
+    mTotalNumReproc = 0;
+    m_parent->mParameters.setCurPPCount(0);
+    m_PPindex = 0;
+    m_InputMetadata.clear();
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getJpegEncodingConfig
+ *
+ * DESCRIPTION: function to prepare encoding job information
+ *
+ * PARAMETERS :
+ *   @encode_parm   : param to be filled with encoding configuration
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::getJpegEncodingConfig(mm_jpeg_encode_params_t& encode_parm,
+                                                    QCameraStream *main_stream,
+                                                    QCameraStream *thumb_stream)
+{
+    CDBG("%s : E", __func__);
+    int32_t ret = NO_ERROR;
+    size_t out_size;
+
+    char prop[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.jpeg_burst", prop, "0");
+    mUseJpegBurst = (atoi(prop) > 0) && !mUseSaveProc;
+    encode_parm.burst_mode = mUseJpegBurst;
+
+    cam_rect_t crop;
+    memset(&crop, 0, sizeof(cam_rect_t));
+    main_stream->getCropInfo(crop);
+
+    cam_dimension_t src_dim, dst_dim;
+    memset(&src_dim, 0, sizeof(cam_dimension_t));
+    memset(&dst_dim, 0, sizeof(cam_dimension_t));
+    main_stream->getFrameDimension(src_dim);
+
+    bool hdr_output_crop = m_parent->mParameters.isHDROutputCropEnabled();
+    if (hdr_output_crop && crop.height) {
+        dst_dim.height = crop.height;
+    } else {
+        dst_dim.height = src_dim.height;
+    }
+    if (hdr_output_crop && crop.width) {
+        dst_dim.width = crop.width;
+    } else {
+        dst_dim.width = src_dim.width;
+    }
+
+    // set rotation only when no online rotation or offline pp rotation is done before
+    if (!m_parent->needRotationReprocess()) {
+        encode_parm.rotation = m_parent->mParameters.getJpegRotation();
+    }
+
+    encode_parm.main_dim.src_dim = src_dim;
+    encode_parm.main_dim.dst_dim = dst_dim;
+
+    m_dst_dim = dst_dim;
+
+    encode_parm.jpeg_cb = mJpegCB;
+    encode_parm.userdata = mJpegUserData;
+
+    m_bThumbnailNeeded = TRUE; // need encode thumbnail by default
+    cam_dimension_t thumbnailSize;
+    memset(&thumbnailSize, 0, sizeof(cam_dimension_t));
+    m_parent->getThumbnailSize(thumbnailSize);
+    if (thumbnailSize.width == 0 || thumbnailSize.height == 0) {
+        // (0,0) means no thumbnail
+        m_bThumbnailNeeded = FALSE;
+    }
+    encode_parm.encode_thumbnail = m_bThumbnailNeeded;
+
+    // get color format
+    cam_format_t img_fmt = CAM_FORMAT_YUV_420_NV12;
+    main_stream->getFormat(img_fmt);
+    encode_parm.color_format = getColorfmtFromImgFmt(img_fmt);
+
+    // get jpeg quality
+    uint32_t val = m_parent->getJpegQuality();
+    if (0U < val) {
+        encode_parm.quality = val;
+    } else {
+        ALOGI("%s: Using default JPEG quality", __func__);
+        encode_parm.quality = 85;
+    }
+    cam_frame_len_offset_t main_offset;
+    memset(&main_offset, 0, sizeof(cam_frame_len_offset_t));
+    main_stream->getFrameOffset(main_offset);
+
+    // src buf config
+    QCameraMemory *pStreamMem = main_stream->getStreamBufs();
+    if (pStreamMem == NULL) {
+        ALOGE("%s: cannot get stream bufs from main stream", __func__);
+        ret = BAD_VALUE;
+        goto on_error;
+    }
+    encode_parm.num_src_bufs = pStreamMem->getCnt();
+    for (uint32_t i = 0; i < encode_parm.num_src_bufs; i++) {
+        camera_memory_t *stream_mem = pStreamMem->getMemory(i, false);
+        if (stream_mem != NULL) {
+            encode_parm.src_main_buf[i].index = i;
+            encode_parm.src_main_buf[i].buf_size = stream_mem->size;
+            encode_parm.src_main_buf[i].buf_vaddr = (uint8_t *)stream_mem->data;
+            encode_parm.src_main_buf[i].fd = pStreamMem->getFd(i);
+            encode_parm.src_main_buf[i].format = MM_JPEG_FMT_YUV;
+            encode_parm.src_main_buf[i].offset = main_offset;
+        }
+    }
+
+    if (m_bThumbnailNeeded == TRUE) {
+        bool need_thumb_rotate = true;
+        uint32_t jpeg_rotation = m_parent->mParameters.getJpegRotation();
+        m_parent->getThumbnailSize(encode_parm.thumb_dim.dst_dim);
+
+        if (thumb_stream == NULL) {
+            thumb_stream = main_stream;
+            need_thumb_rotate = false;
+        }
+        pStreamMem = thumb_stream->getStreamBufs();
+        if (pStreamMem == NULL) {
+            ALOGE("%s: cannot get stream bufs from thumb stream", __func__);
+            ret = BAD_VALUE;
+            goto on_error;
+        }
+        cam_frame_len_offset_t thumb_offset;
+        memset(&thumb_offset, 0, sizeof(cam_frame_len_offset_t));
+        thumb_stream->getFrameOffset(thumb_offset);
+        encode_parm.num_tmb_bufs =  pStreamMem->getCnt();
+        for (uint32_t i = 0; i < pStreamMem->getCnt(); i++) {
+            camera_memory_t *stream_mem = pStreamMem->getMemory(i, false);
+            if (stream_mem != NULL) {
+                encode_parm.src_thumb_buf[i].index = i;
+                encode_parm.src_thumb_buf[i].buf_size = stream_mem->size;
+                encode_parm.src_thumb_buf[i].buf_vaddr = (uint8_t *)stream_mem->data;
+                encode_parm.src_thumb_buf[i].fd = pStreamMem->getFd(i);
+                encode_parm.src_thumb_buf[i].format = MM_JPEG_FMT_YUV;
+                encode_parm.src_thumb_buf[i].offset = thumb_offset;
+            }
+        }
+        cam_format_t img_fmt_thumb = CAM_FORMAT_YUV_420_NV12;
+        thumb_stream->getFormat(img_fmt_thumb);
+        encode_parm.thumb_color_format = getColorfmtFromImgFmt(img_fmt_thumb);
+
+        // crop is the same if frame is the same
+        if (thumb_stream != main_stream) {
+            memset(&crop, 0, sizeof(cam_rect_t));
+            thumb_stream->getCropInfo(crop);
+        }
+
+        memset(&src_dim, 0, sizeof(cam_dimension_t));
+        thumb_stream->getFrameDimension(src_dim);
+        encode_parm.thumb_dim.src_dim = src_dim;
+
+        if (!m_parent->needRotationReprocess() || need_thumb_rotate) {
+            encode_parm.thumb_rotation = jpeg_rotation;
+        } else if ((90 == jpeg_rotation) || (270 == jpeg_rotation)) {
+            // swap thumbnail dimensions
+            cam_dimension_t tmp_dim = encode_parm.thumb_dim.dst_dim;
+            encode_parm.thumb_dim.dst_dim.width = tmp_dim.height;
+            encode_parm.thumb_dim.dst_dim.height = tmp_dim.width;
+        }
+        encode_parm.thumb_dim.crop = crop;
+    }
+
+    encode_parm.num_dst_bufs = 1;
+    if (mUseJpegBurst) {
+        encode_parm.num_dst_bufs = MAX_JPEG_BURST;
+    }
+    encode_parm.get_memory = NULL;
+    out_size = main_offset.frame_len;
+    if (mJpegMemOpt) {
+        encode_parm.get_memory = getJpegMemory;
+        out_size = sizeof(omx_jpeg_ouput_buf_t);
+        encode_parm.num_dst_bufs = encode_parm.num_src_bufs;
+    }
+    m_JpegOutputMemCount = (uint32_t)encode_parm.num_dst_bufs;
+    for (uint32_t i = 0; i < m_JpegOutputMemCount; i++) {
+        if (m_pJpegOutputMem[i] != NULL)
+          free(m_pJpegOutputMem[i]);
+        omx_jpeg_ouput_buf_t omx_out_buf;
+        omx_out_buf.handle = this;
+        // allocate output buf for jpeg encoding
+        m_pJpegOutputMem[i] = malloc(out_size);
+
+        if (NULL == m_pJpegOutputMem[i]) {
+          ret = NO_MEMORY;
+          ALOGE("%s : initHeapMem for jpeg, ret = NO_MEMORY", __func__);
+          goto on_error;
+        }
+
+        if (mJpegMemOpt) {
+            memcpy(m_pJpegOutputMem[i], &omx_out_buf, sizeof(omx_out_buf));
+        }
+
+
+        encode_parm.dest_buf[i].index = i;
+        encode_parm.dest_buf[i].buf_size = main_offset.frame_len;
+        encode_parm.dest_buf[i].buf_vaddr = (uint8_t *)m_pJpegOutputMem[i];
+        encode_parm.dest_buf[i].fd = -1;
+        encode_parm.dest_buf[i].format = MM_JPEG_FMT_YUV;
+        encode_parm.dest_buf[i].offset = main_offset;
+    }
+
+
+    CDBG("%s : X", __func__);
+    return NO_ERROR;
+
+on_error:
+    FREE_JPEG_OUTPUT_BUFFER(m_pJpegOutputMem, m_JpegOutputMemCount);
+
+    CDBG("%s : X with error %d", __func__, ret);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : sendEvtNotify
+ *
+ * DESCRIPTION: send event notify through notify callback registered by upper layer
+ *
+ * PARAMETERS :
+ *   @msg_type: msg type of notify
+ *   @ext1    : extension
+ *   @ext2    : extension
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::sendEvtNotify(int32_t msg_type,
+                                            int32_t ext1,
+                                            int32_t ext2)
+{
+    return m_parent->sendEvtNotify(msg_type, ext1, ext2);
+}
+
+/*===========================================================================
+ * FUNCTION   : sendDataNotify
+ *
+ * DESCRIPTION: enqueue data into dataNotify thread
+ *
+ * PARAMETERS :
+ *   @msg_type: data callback msg type
+ *   @data    : ptr to data memory struct
+ *   @index   : index to data buffer
+ *   @metadata: ptr to meta data buffer if there is any
+ *   @release_data : ptr to struct indicating if data need to be released
+ *                   after notify
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::sendDataNotify(int32_t msg_type,
+                                             camera_memory_t *data,
+                                             uint8_t index,
+                                             camera_frame_metadata_t *metadata,
+                                             qcamera_release_data_t *release_data)
+{
+    qcamera_data_argm_t *data_cb = (qcamera_data_argm_t *)malloc(sizeof(qcamera_data_argm_t));
+    if (NULL == data_cb) {
+        ALOGE("%s: no mem for acamera_data_argm_t", __func__);
+        return NO_MEMORY;
+    }
+    memset(data_cb, 0, sizeof(qcamera_data_argm_t));
+    data_cb->msg_type = msg_type;
+    data_cb->data = data;
+    data_cb->index = index;
+    data_cb->metadata = metadata;
+    if (release_data != NULL) {
+        data_cb->release_data = *release_data;
+    }
+
+    qcamera_callback_argm_t cbArg;
+    memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+    cbArg.cb_type = QCAMERA_DATA_SNAPSHOT_CALLBACK;
+    cbArg.msg_type = msg_type;
+    cbArg.data = data;
+    cbArg.metadata = metadata;
+    cbArg.user_data = data_cb;
+    cbArg.cookie = this;
+    cbArg.release_cb = releaseNotifyData;
+    int rc = m_parent->m_cbNotifier.notifyCallback(cbArg);
+    if ( NO_ERROR != rc ) {
+        ALOGE("%s: Error enqueuing jpeg data into notify queue", __func__);
+        releaseNotifyData(data_cb, this, UNKNOWN_ERROR);
+        return UNKNOWN_ERROR;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : validatePostProcess
+ *
+ * DESCRIPTION: Verify output buffer count of pp module
+ *
+ * PARAMETERS :
+ *   @frame   : process frame received from mm-camera-interface
+ *
+ * RETURN     : bool type of status
+ *              TRUE  -- success
+ *              FALSE     failure
+ *==========================================================================*/
+bool QCameraPostProcessor::validatePostProcess(mm_camera_super_buf_t *frame)
+{
+    bool status = TRUE;
+    QCameraChannel *pChannel = NULL;
+    QCameraReprocessChannel *m_pReprocChannel = NULL;
+
+    if (frame == NULL) {
+        return status;
+    }
+
+    pChannel = m_parent->getChannelByHandle(frame->ch_id);
+    for (int8_t i = 0; i < mTotalNumReproc; i++) {
+        if (pChannel == mPPChannels[i]->getSrcChannel()) {
+            m_pReprocChannel = mPPChannels[i];
+            break;
+        }
+    }
+
+    if (m_pReprocChannel != NULL && pChannel == m_pReprocChannel->getSrcChannel()) {
+        QCameraStream *pStream = NULL;
+        for (uint8_t i = 0; i < m_pReprocChannel->getNumOfStreams(); i++) {
+            pStream = m_pReprocChannel->getStreamByIndex(i);
+            if (pStream && (m_inputPPQ.getCurrentSize() > 0) &&
+                    m_ongoingPPQ.getCurrentSize() >=  pStream->getNumQueuedBuf()) {
+                CDBG_HIGH("Out of PP Buffer PPQ = %d ongoingQ = %d Jpeg = %d onJpeg = %d",
+                        m_inputPPQ.getCurrentSize(), m_inputPPQ.getCurrentSize(),
+                        m_inputJpegQ.getCurrentSize(), m_ongoingJpegQ.getCurrentSize());
+                status = FALSE;
+                break;
+            }
+        }
+    }
+    return status;
+}
+
+/*===========================================================================
+ * FUNCTION   : processData
+ *
+ * DESCRIPTION: enqueue data into dataProc thread
+ *
+ * PARAMETERS :
+ *   @frame   : process frame received from mm-camera-interface
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : depends on if offline reprocess is needed, received frame will
+ *              be sent to either input queue of postprocess or jpeg encoding
+ *==========================================================================*/
+int32_t QCameraPostProcessor::processData(mm_camera_super_buf_t *frame)
+{
+    bool triggerEvent = TRUE;
+    QCameraChannel *m_pReprocChannel = NULL;
+
+    if (m_bInited == FALSE) {
+        ALOGE("%s: postproc not initialized yet", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    mm_camera_buf_def_t *meta_frame = NULL;
+    for (uint32_t i = 0; i < frame->num_bufs; i++) {
+        // look through input superbuf
+        if (frame->bufs[i]->stream_type == CAM_STREAM_TYPE_METADATA) {
+            meta_frame = frame->bufs[i];
+            break;
+        }
+    }
+    if (meta_frame != NULL) {
+        //Function to upadte metadata for frame based parameter
+        m_parent->updateMetadata((metadata_buffer_t *)meta_frame->buffer);
+    }
+
+    if (m_parent->needReprocess()) {
+        if ((!m_parent->isLongshotEnabled() &&
+             !m_parent->m_stateMachine.isNonZSLCaptureRunning()) ||
+            (m_parent->isLongshotEnabled() &&
+             m_parent->isCaptureShutterEnabled())) {
+            //play shutter sound
+            m_parent->playShutter();
+        }
+
+        ATRACE_INT("Camera:Reprocess", 1);
+        CDBG_HIGH("%s: need reprocess", __func__);
+
+        // enqueu to post proc input queue
+        qcamera_pp_request_t *pp_request_job =
+                (qcamera_pp_request_t *)malloc(sizeof(qcamera_pp_request_t));
+        if (pp_request_job == NULL) {
+            ALOGE("%s: No memory for pproc job", __func__);
+            return NO_MEMORY;
+        }
+        memset(pp_request_job, 0, sizeof(qcamera_pp_request_t));
+        pp_request_job->src_frame = frame;
+        pp_request_job->src_reproc_frame = frame;
+        pp_request_job->reprocCount = 0;
+        if (m_inputPPQ.enqueue((void *)pp_request_job)) {
+            //avoid sending frame for reprocessing if o/p buffer is not queued to CPP.
+            triggerEvent = validatePostProcess(frame);
+        }else {
+            CDBG_HIGH("%s : Input PP Q is not active!!!", __func__);
+            releaseSuperBuf(frame);
+            free(frame);
+            free(pp_request_job);
+            frame = NULL;
+            pp_request_job = NULL;
+            return NO_ERROR;
+        }
+        if (m_parent->mParameters.isAdvCamFeaturesEnabled()
+                && (meta_frame != NULL)) {
+            m_InputMetadata.add(meta_frame);
+        }
+    } else if (m_parent->mParameters.isNV16PictureFormat() ||
+        m_parent->mParameters.isNV21PictureFormat()) {
+        //check if raw frame information is needed.
+        if(m_parent->mParameters.isYUVFrameInfoNeeded())
+            setYUVFrameInfo(frame);
+
+        processRawData(frame);
+    } else {
+        //play shutter sound
+        if(!m_parent->m_stateMachine.isNonZSLCaptureRunning() &&
+           !m_parent->mLongshotEnabled)
+           m_parent->playShutter();
+
+        CDBG_HIGH("%s: no need offline reprocess, sending to jpeg encoding", __func__);
+        qcamera_jpeg_data_t *jpeg_job =
+            (qcamera_jpeg_data_t *)malloc(sizeof(qcamera_jpeg_data_t));
+        if (jpeg_job == NULL) {
+            ALOGE("%s: No memory for jpeg job", __func__);
+            return NO_MEMORY;
+        }
+
+        memset(jpeg_job, 0, sizeof(qcamera_jpeg_data_t));
+        jpeg_job->src_frame = frame;
+
+        if (meta_frame != NULL) {
+            // fill in meta data frame ptr
+            jpeg_job->metadata = (metadata_buffer_t *)meta_frame->buffer;
+        }
+
+        // enqueu to jpeg input queue
+        if (!m_inputJpegQ.enqueue((void *)jpeg_job)) {
+            CDBG_HIGH("%s : Input Jpeg Q is not active!!!", __func__);
+            releaseJpegJobData(jpeg_job);
+            free(jpeg_job);
+            jpeg_job = NULL;
+            return NO_ERROR;
+        }
+    }
+
+    if (triggerEvent){
+        m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : processRawData
+ *
+ * DESCRIPTION: enqueue raw data into dataProc thread
+ *
+ * PARAMETERS :
+ *   @frame   : process frame received from mm-camera-interface
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::processRawData(mm_camera_super_buf_t *frame)
+{
+    if (m_bInited == FALSE) {
+        ALOGE("%s: postproc not initialized yet", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    // enqueu to raw input queue
+    if (m_inputRawQ.enqueue((void *)frame)) {
+        m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    } else {
+        CDBG_HIGH("%s : m_inputRawQ is not active!!!", __func__);
+        releaseSuperBuf(frame);
+        free(frame);
+        frame = NULL;
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : processJpegEvt
+ *
+ * DESCRIPTION: process jpeg event from mm-jpeg-interface.
+ *
+ * PARAMETERS :
+ *   @evt     : payload of jpeg event, including information about jpeg encoding
+ *              status, jpeg size and so on.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : This event will also trigger DataProc thread to move to next job
+ *              processing (i.e., send a new jpeg encoding job to mm-jpeg-interface
+ *              if there is any pending job in jpeg input queue)
+ *==========================================================================*/
+int32_t QCameraPostProcessor::processJpegEvt(qcamera_jpeg_evt_payload_t *evt)
+{
+    if (m_bInited == FALSE) {
+        ALOGE("%s: postproc not initialized yet", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    int32_t rc = NO_ERROR;
+    camera_memory_t *jpeg_mem = NULL;
+    omx_jpeg_ouput_buf_t *jpeg_out = NULL;
+
+    if (mUseSaveProc && m_parent->isLongshotEnabled()) {
+        qcamera_jpeg_evt_payload_t *saveData = ( qcamera_jpeg_evt_payload_t * ) malloc(sizeof(qcamera_jpeg_evt_payload_t));
+        if ( NULL == saveData ) {
+            ALOGE("%s: Can not allocate save data message!", __func__);
+            return NO_MEMORY;
+        }
+        *saveData = *evt;
+        if (m_inputSaveQ.enqueue((void *) saveData)) {
+            m_saveProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+        } else {
+            CDBG("%s : m_inputSaveQ PP Q is not active!!!", __func__);
+            free(saveData);
+            saveData = NULL;
+            return rc;
+        }
+    } else {
+        // Release jpeg job data
+        m_ongoingJpegQ.flushNodes(matchJobId, (void*)&evt->jobId);
+
+        if (m_inputPPQ.getCurrentSize() > 0) {
+            m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+        }
+        CDBG_HIGH("[KPI Perf] %s : jpeg job %d", __func__, evt->jobId);
+
+        if ((false == m_parent->m_bIntJpegEvtPending) &&
+             (m_parent->mDataCb == NULL ||
+              m_parent->msgTypeEnabledWithLock(CAMERA_MSG_COMPRESSED_IMAGE) == 0 )) {
+            CDBG_HIGH("%s: No dataCB or CAMERA_MSG_COMPRESSED_IMAGE not enabled",
+                  __func__);
+            rc = NO_ERROR;
+            goto end;
+        }
+
+        if(evt->status == JPEG_JOB_STATUS_ERROR) {
+            ALOGE("%s: Error event handled from jpeg, status = %d",
+                  __func__, evt->status);
+            rc = FAILED_TRANSACTION;
+            goto end;
+        }
+
+        m_parent->dumpJpegToFile(evt->out_data.buf_vaddr,
+                                  evt->out_data.buf_filled_len,
+                                  evt->jobId);
+        CDBG_HIGH("%s: Dump jpeg_size=%d", __func__, evt->out_data.buf_filled_len);
+
+        if(true == m_parent->m_bIntJpegEvtPending) {
+            //Sending JPEG snapshot taken notification to HAL
+            pthread_mutex_lock(&m_parent->m_int_lock);
+            pthread_cond_signal(&m_parent->m_int_cond);
+            pthread_mutex_unlock(&m_parent->m_int_lock);
+            m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+            return rc;
+        }
+
+        if (!mJpegMemOpt) {
+            // alloc jpeg memory to pass to upper layer
+            jpeg_mem = m_parent->mGetMemory(-1, evt->out_data.buf_filled_len,
+                1, m_parent->mCallbackCookie);
+            if (NULL == jpeg_mem) {
+                rc = NO_MEMORY;
+                ALOGE("%s : getMemory for jpeg, ret = NO_MEMORY", __func__);
+                goto end;
+            }
+            memcpy(jpeg_mem->data, evt->out_data.buf_vaddr, evt->out_data.buf_filled_len);
+        } else {
+            jpeg_out  = (omx_jpeg_ouput_buf_t*) evt->out_data.buf_vaddr;
+            jpeg_mem = (camera_memory_t *)jpeg_out->mem_hdl;
+        }
+
+        CDBG_HIGH("%s : Calling upperlayer callback to store JPEG image", __func__);
+        qcamera_release_data_t release_data;
+        memset(&release_data, 0, sizeof(qcamera_release_data_t));
+        release_data.data = jpeg_mem;
+        CDBG_HIGH("[KPI Perf] %s: PROFILE_JPEG_CB ",__func__);
+        rc = sendDataNotify(CAMERA_MSG_COMPRESSED_IMAGE,
+                            jpeg_mem,
+                            0,
+                            NULL,
+                            &release_data);
+        m_parent->setOutputImageCount(m_parent->getOutputImageCount() + 1);
+
+end:
+        if (rc != NO_ERROR) {
+            // send error msg to upper layer
+            sendEvtNotify(CAMERA_MSG_ERROR,
+                          UNKNOWN_ERROR,
+                          0);
+
+            if (NULL != jpeg_mem) {
+                jpeg_mem->release(jpeg_mem);
+                jpeg_mem = NULL;
+            }
+        }
+
+        /* check whether to send callback for depth map */
+        if (m_parent->mParameters.isUbiRefocus() &&
+                (m_parent->getOutputImageCount() + 1 ==
+                        m_parent->mParameters.getRefocusOutputCount())) {
+            m_parent->setOutputImageCount(m_parent->getOutputImageCount() + 1);
+
+            jpeg_mem = m_DataMem;
+            release_data.data = jpeg_mem;
+            m_DataMem = NULL;
+            CDBG_HIGH("[KPI Perf] %s: send jpeg callback for depthmap ",__func__);
+            rc = sendDataNotify(CAMERA_MSG_COMPRESSED_IMAGE,
+                jpeg_mem,
+                0,
+                NULL,
+                &release_data);
+            if (rc != NO_ERROR) {
+                // send error msg to upper layer
+                sendEvtNotify(CAMERA_MSG_ERROR,
+                        UNKNOWN_ERROR,
+                        0);
+                if (NULL != jpeg_mem) {
+                    jpeg_mem->release(jpeg_mem);
+                    jpeg_mem = NULL;
+                }
+            }
+        }
+    }
+
+    // wait up data proc thread to do next job,
+    // if previous request is blocked due to ongoing jpeg job
+    m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : processPPData
+ *
+ * DESCRIPTION: process received frame after reprocess.
+ *
+ * PARAMETERS :
+ *   @frame   : received frame from reprocess channel.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : The frame after reprocess need to send to jpeg encoding.
+ *==========================================================================*/
+int32_t QCameraPostProcessor::processPPData(mm_camera_super_buf_t *frame)
+{
+    bool triggerEvent = TRUE;
+
+    bool needSuperBufMatch = m_parent->mParameters.generateThumbFromMain();
+    if (m_bInited == FALSE) {
+        ALOGE("%s: postproc not initialized yet", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    qcamera_pp_data_t *job = (qcamera_pp_data_t *)m_ongoingPPQ.dequeue();
+    if (NULL == job) {
+        ALOGE("%s: Cannot find reprocess job", __func__);
+        return BAD_VALUE;
+    }
+
+    if (!needSuperBufMatch && (job->src_frame == NULL
+            || job->src_reproc_frame == NULL) ) {
+        ALOGE("%s: Invalid reprocess job", __func__);
+        return BAD_VALUE;
+    }
+
+    if (!needSuperBufMatch && (m_parent->mParameters.isNV16PictureFormat() ||
+        m_parent->mParameters.isNV21PictureFormat())) {
+        releaseOngoingPPData(job, this);
+        free(job);
+
+        if(m_parent->mParameters.isYUVFrameInfoNeeded())
+            setYUVFrameInfo(frame);
+        return processRawData(frame);
+    }
+    if (m_parent->isLongshotEnabled() &&
+            !m_parent->isCaptureShutterEnabled()) {
+        // play shutter sound for longshot
+        // after reprocess is done
+        // TODO: Move this after CAC done event
+        m_parent->playShutter();
+    }
+
+    int8_t mCurReprocCount = job->reprocCount;
+    if ( mCurReprocCount > 1 ) {
+        //In case of pp 2nd pass, we can release input of 2nd pass
+        releaseSuperBuf(job->src_frame);
+        free(job->src_frame);
+        job->src_frame = NULL;
+    }
+
+    CDBG("%s: mCurReprocCount = %d mTotalNumReproc = %d",
+            __func__, mCurReprocCount, mTotalNumReproc);
+    if (mCurReprocCount < mTotalNumReproc) {
+        //More pp pass needed. Push frame back to pp queue.
+        qcamera_pp_request_t *pp_request_job =
+                (qcamera_pp_request_t *)malloc(sizeof(qcamera_pp_request_t));
+        if (pp_request_job == NULL) {
+            ALOGE("%s: No memory for pproc job", __func__);
+            return NO_MEMORY;
+        }
+        memset(pp_request_job, 0, sizeof(qcamera_pp_request_t));
+        pp_request_job->src_frame = frame;
+        pp_request_job->src_reproc_frame = job->src_reproc_frame;
+        pp_request_job->reprocCount = mCurReprocCount;
+        // enqueu to post proc input queue
+        if (m_inputPPQ.enqueue((void *)pp_request_job)) {
+            triggerEvent = validatePostProcess(frame);
+        } else {
+            CDBG_HIGH("%s : m_input PP Q is not active!!!", __func__);
+            releasePPInputData(pp_request_job,this);
+            free(pp_request_job);
+            pp_request_job = NULL;
+            triggerEvent = FALSE;
+        }
+    } else {
+        //Done with post processing. Send frame to Jpeg
+        qcamera_jpeg_data_t *jpeg_job =
+                (qcamera_jpeg_data_t *)malloc(sizeof(qcamera_jpeg_data_t));
+        if (jpeg_job == NULL) {
+            ALOGE("%s: No memory for jpeg job", __func__);
+            return NO_MEMORY;
+        }
+
+        memset(jpeg_job, 0, sizeof(qcamera_jpeg_data_t));
+        jpeg_job->src_frame = frame;
+        jpeg_job->src_reproc_frame = job ? job->src_reproc_frame : NULL;
+        jpeg_job->src_reproc_bufs = job ? job->src_reproc_bufs : NULL;
+        jpeg_job->reproc_frame_release = job ? job->reproc_frame_release : false;
+
+        // find meta data frame
+        mm_camera_buf_def_t *meta_frame = NULL;
+        if (m_parent->mParameters.isAdvCamFeaturesEnabled()) {
+            size_t meta_idx = m_parent->mParameters.getExifBufIndex(m_PPindex);
+            if (m_InputMetadata.size() >= (meta_idx + 1)) {
+                meta_frame = m_InputMetadata.itemAt(meta_idx);
+            } else {
+                ALOGE("%s: Input metadata vector contains %d entries, index required %d",
+                        __func__, m_InputMetadata.size(), meta_idx);
+            }
+            m_PPindex++;
+        } else {
+            for (uint32_t i = 0; job && job->src_reproc_frame &&
+                    (i < job->src_reproc_frame->num_bufs); i++) {
+                // look through input superbuf
+                if (job->src_reproc_frame->bufs[i]->stream_type == CAM_STREAM_TYPE_METADATA) {
+                    meta_frame = job->src_reproc_frame->bufs[i];
+                    break;
+                }
+            }
+
+            if (meta_frame == NULL) {
+                // look through reprocess superbuf
+                for (uint32_t i = 0; i < frame->num_bufs; i++) {
+                    if (frame->bufs[i]->stream_type == CAM_STREAM_TYPE_METADATA) {
+                        meta_frame = frame->bufs[i];
+                        break;
+                    }
+                }
+            }
+        }
+        if (meta_frame != NULL) {
+            // fill in meta data frame ptr
+            jpeg_job->metadata = (metadata_buffer_t *)meta_frame->buffer;
+        }
+
+        // enqueu reprocessed frame to jpeg input queue
+        if (m_inputJpegQ.enqueue((void *)jpeg_job)) {
+            if (m_parent->isLongshotEnabled()) {
+                triggerEvent = validatePostProcess(frame);
+            }
+        } else {
+            CDBG_HIGH("%s : Input Jpeg Q is not active!!!", __func__);
+            releaseJpegJobData(jpeg_job);
+            free(jpeg_job);
+            jpeg_job = NULL;
+            triggerEvent = FALSE;
+        }
+    }
+
+    // free pp job buf
+    if (job) {
+        free(job);
+    }
+
+    ALOGD("%s: %d] ", __func__, __LINE__);
+    // wait up data proc thread
+
+    if (triggerEvent) {
+        m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : findJpegJobByJobId
+ *
+ * DESCRIPTION: find a jpeg job from ongoing Jpeg queue by its job ID
+ *
+ * PARAMETERS :
+ *   @jobId   : job Id of the job
+ *
+ * RETURN     : ptr to a jpeg job struct. NULL if not found.
+ *
+ * NOTE       : Currently only one job is sending to mm-jpeg-interface for jpeg
+ *              encoding. Therefore simply dequeue from the ongoing Jpeg Queue
+ *              will serve the purpose to find the jpeg job.
+ *==========================================================================*/
+qcamera_jpeg_data_t *QCameraPostProcessor::findJpegJobByJobId(uint32_t jobId)
+{
+    qcamera_jpeg_data_t * job = NULL;
+    if (jobId == 0) {
+        ALOGE("%s: not a valid jpeg jobId", __func__);
+        return NULL;
+    }
+
+    // currely only one jpeg job ongoing, so simply dequeue the head
+    job = (qcamera_jpeg_data_t *)m_ongoingJpegQ.dequeue();
+    return job;
+}
+
+/*===========================================================================
+ * FUNCTION   : releasePPInputData
+ *
+ * DESCRIPTION: callback function to release post process input data node
+ *
+ * PARAMETERS :
+ *   @data      : ptr to post process input data
+ *   @user_data : user data ptr (QCameraReprocessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraPostProcessor::releasePPInputData(void *data, void *user_data)
+{
+    QCameraPostProcessor *pme = (QCameraPostProcessor *)user_data;
+    if (NULL != pme) {
+        qcamera_pp_request_t *pp_job = (qcamera_pp_request_t *)data;
+        if (NULL != pp_job->src_frame) {
+            pme->releaseSuperBuf(pp_job->src_frame);
+            if (pp_job->src_frame == pp_job->src_reproc_frame)
+                pp_job->src_reproc_frame = NULL;
+            free(pp_job->src_frame);
+            pp_job->src_frame = NULL;
+        }
+        if (NULL != pp_job->src_reproc_frame) {
+            pme->releaseSuperBuf(pp_job->src_reproc_frame);
+            free(pp_job->src_reproc_frame);
+            pp_job->src_reproc_frame = NULL;
+        }
+        pp_job->reprocCount = 0;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseJpegData
+ *
+ * DESCRIPTION: callback function to release jpeg job node
+ *
+ * PARAMETERS :
+ *   @data      : ptr to ongoing jpeg job data
+ *   @user_data : user data ptr (QCameraReprocessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraPostProcessor::releaseJpegData(void *data, void *user_data)
+{
+    QCameraPostProcessor *pme = (QCameraPostProcessor *)user_data;
+    if (NULL != pme) {
+        pme->releaseJpegJobData((qcamera_jpeg_data_t *)data);
+        CDBG_HIGH("%s : Rleased job ID %u", __func__,
+            ((qcamera_jpeg_data_t *)data)->jobId);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseOngoingPPData
+ *
+ * DESCRIPTION: callback function to release ongoing postprocess job node
+ *
+ * PARAMETERS :
+ *   @data      : ptr to onging postprocess job
+ *   @user_data : user data ptr (QCameraReprocessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraPostProcessor::releaseOngoingPPData(void *data, void *user_data)
+{
+    QCameraPostProcessor *pme = (QCameraPostProcessor *)user_data;
+    if (NULL != pme) {
+        qcamera_pp_data_t *pp_job = (qcamera_pp_data_t *)data;
+        if (NULL != pp_job->src_frame) {
+            if (!pp_job->reproc_frame_release) {
+                pme->releaseSuperBuf(pp_job->src_frame);
+            }
+            if (pp_job->src_frame == pp_job->src_reproc_frame)
+                pp_job->src_reproc_frame = NULL;
+
+            free(pp_job->src_frame);
+            pp_job->src_frame = NULL;
+        }
+        if (NULL != pp_job->src_reproc_frame) {
+            pme->releaseSuperBuf(pp_job->src_reproc_frame);
+            free(pp_job->src_reproc_frame);
+            pp_job->src_reproc_frame = NULL;
+        }
+        pp_job->reprocCount = 0;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseNotifyData
+ *
+ * DESCRIPTION: function to release internal resources in notify data struct
+ *
+ * PARAMETERS :
+ *   @user_data  : ptr user data
+ *   @cookie     : callback cookie
+ *   @cb_status  : callback status
+ *
+ * RETURN     : None
+ *
+ * NOTE       : deallocate jpeg heap memory if it's not NULL
+ *==========================================================================*/
+void QCameraPostProcessor::releaseNotifyData(void *user_data,
+                                             void *cookie,
+                                             int32_t cb_status)
+{
+    qcamera_data_argm_t *app_cb = ( qcamera_data_argm_t * ) user_data;
+    QCameraPostProcessor *postProc = ( QCameraPostProcessor * ) cookie;
+    if ( ( NULL != app_cb ) && ( NULL != postProc ) ) {
+
+        if ( postProc->mUseSaveProc &&
+             app_cb->release_data.unlinkFile &&
+             ( NO_ERROR != cb_status ) ) {
+
+            String8 unlinkPath((const char *) app_cb->release_data.data->data,
+                                app_cb->release_data.data->size);
+            int rc = unlink(unlinkPath.string());
+            CDBG_HIGH("%s : Unlinking stored file rc = %d",
+                  __func__,
+                  rc);
+        }
+
+        if (app_cb && NULL != app_cb->release_data.data) {
+            app_cb->release_data.data->release(app_cb->release_data.data);
+            app_cb->release_data.data = NULL;
+        }
+        if (app_cb && NULL != app_cb->release_data.frame) {
+            postProc->releaseSuperBuf(app_cb->release_data.frame);
+            free(app_cb->release_data.frame);
+            app_cb->release_data.frame = NULL;
+        }
+        if (app_cb && NULL != app_cb->release_data.streamBufs) {
+            app_cb->release_data.streamBufs->deallocate();
+            delete app_cb->release_data.streamBufs;
+            app_cb->release_data.streamBufs = NULL;
+        }
+        free(app_cb);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseSuperBuf
+ *
+ * DESCRIPTION: function to release a superbuf frame by returning back to kernel
+ *
+ * PARAMETERS :
+ *   @super_buf : ptr to the superbuf frame
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraPostProcessor::releaseSuperBuf(mm_camera_super_buf_t *super_buf)
+{
+    QCameraChannel *pChannel = NULL;
+
+    if (NULL != super_buf) {
+        pChannel = m_parent->getChannelByHandle(super_buf->ch_id);
+
+        if ( NULL == pChannel ) {
+            for (int8_t i = 0; i < mTotalNumReproc; i++) {
+                if ((mPPChannels[i] != NULL) &&
+                        (mPPChannels[i]->getMyHandle() == super_buf->ch_id)) {
+                    pChannel = mPPChannels[i];
+                    break;
+                }
+            }
+        }
+
+        if (pChannel != NULL) {
+            pChannel->bufDone(super_buf);
+        } else {
+            ALOGE(" %s : Channel id %d not found!!",
+                  __func__,
+                  super_buf->ch_id);
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseJpegJobData
+ *
+ * DESCRIPTION: function to release internal resources in jpeg job struct
+ *
+ * PARAMETERS :
+ *   @job     : ptr to jpeg job struct
+ *
+ * RETURN     : None
+ *
+ * NOTE       : original source frame need to be queued back to kernel for
+ *              future use. Output buf of jpeg job need to be released since
+ *              it's allocated for each job. Exif object need to be deleted.
+ *==========================================================================*/
+void QCameraPostProcessor::releaseJpegJobData(qcamera_jpeg_data_t *job)
+{
+    CDBG("%s: E", __func__);
+    if (NULL != job) {
+        if (NULL != job->src_reproc_frame) {
+            if (!job->reproc_frame_release) {
+                releaseSuperBuf(job->src_reproc_frame);
+            }
+            free(job->src_reproc_frame);
+            job->src_reproc_frame = NULL;
+        }
+
+        if (NULL != job->src_frame) {
+            releaseSuperBuf(job->src_frame);
+            free(job->src_frame);
+            job->src_frame = NULL;
+        }
+
+        if (NULL != job->pJpegExifObj) {
+            delete job->pJpegExifObj;
+            job->pJpegExifObj = NULL;
+        }
+
+        if (NULL != job->src_reproc_bufs) {
+            delete [] job->src_reproc_bufs;
+        }
+
+    }
+    CDBG("%s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseSaveJobData
+ *
+ * DESCRIPTION: function to release internal resources in store jobs
+ *
+ * PARAMETERS :
+ *   @job     : ptr to save job struct
+ *
+ * RETURN     : None
+ *
+ *==========================================================================*/
+void QCameraPostProcessor::releaseSaveJobData(void *data, void *user_data)
+{
+    CDBG("%s: E", __func__);
+
+    QCameraPostProcessor *pme = (QCameraPostProcessor *) user_data;
+    if (NULL == pme) {
+        ALOGE("%s: Invalid postproc handle", __func__);
+        return;
+    }
+
+    qcamera_jpeg_evt_payload_t *job_data = (qcamera_jpeg_evt_payload_t *) data;
+    if (job_data == NULL) {
+        ALOGE("%s: Invalid jpeg event data", __func__);
+        return;
+    }
+
+    // find job by jobId
+    qcamera_jpeg_data_t *job = pme->findJpegJobByJobId(job_data->jobId);
+
+    if (NULL != job) {
+        pme->releaseJpegJobData(job);
+        free(job);
+    } else {
+        ALOGE("%s : Invalid jpeg job", __func__);
+    }
+
+    CDBG("%s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseRawData
+ *
+ * DESCRIPTION: function to release internal resources in store jobs
+ *
+ * PARAMETERS :
+ *   @job     : ptr to save job struct
+ *
+ * RETURN     : None
+ *
+ *==========================================================================*/
+void QCameraPostProcessor::releaseRawData(void *data, void *user_data)
+{
+    CDBG("%s: E", __func__);
+
+    QCameraPostProcessor *pme = (QCameraPostProcessor *) user_data;
+    if (NULL == pme) {
+        ALOGE("%s: Invalid postproc handle", __func__);
+        return;
+    }
+    mm_camera_super_buf_t *super_buf = (mm_camera_super_buf_t *) data;
+    pme->releaseSuperBuf(super_buf);
+
+    CDBG("%s: X", __func__);
+}
+
+
+/*===========================================================================
+ * FUNCTION   : getColorfmtFromImgFmt
+ *
+ * DESCRIPTION: function to return jpeg color format based on its image format
+ *
+ * PARAMETERS :
+ *   @img_fmt : image format
+ *
+ * RETURN     : jpeg color format that can be understandable by omx lib
+ *==========================================================================*/
+mm_jpeg_color_format QCameraPostProcessor::getColorfmtFromImgFmt(cam_format_t img_fmt)
+{
+    switch (img_fmt) {
+    case CAM_FORMAT_YUV_420_NV21:
+        return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2;
+    case CAM_FORMAT_YUV_420_NV21_ADRENO:
+        return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2;
+    case CAM_FORMAT_YUV_420_NV12:
+        return MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2;
+    case CAM_FORMAT_YUV_420_YV12:
+        return MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2;
+    case CAM_FORMAT_YUV_422_NV61:
+        return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V1;
+    case CAM_FORMAT_YUV_422_NV16:
+        return MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V1;
+    default:
+        return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getJpegImgTypeFromImgFmt
+ *
+ * DESCRIPTION: function to return jpeg encode image type based on its image format
+ *
+ * PARAMETERS :
+ *   @img_fmt : image format
+ *
+ * RETURN     : return jpeg source image format (YUV or Bitstream)
+ *==========================================================================*/
+mm_jpeg_format_t QCameraPostProcessor::getJpegImgTypeFromImgFmt(cam_format_t img_fmt)
+{
+    switch (img_fmt) {
+    case CAM_FORMAT_YUV_420_NV21:
+    case CAM_FORMAT_YUV_420_NV21_ADRENO:
+    case CAM_FORMAT_YUV_420_NV12:
+    case CAM_FORMAT_YUV_420_YV12:
+    case CAM_FORMAT_YUV_422_NV61:
+    case CAM_FORMAT_YUV_422_NV16:
+        return MM_JPEG_FMT_YUV;
+    default:
+        return MM_JPEG_FMT_YUV;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : queryStreams
+ *
+ * DESCRIPTION: utility method for retrieving main, thumbnail and reprocess
+ *              streams and frame from bundled super buffer
+ *
+ * PARAMETERS :
+ *   @main    : ptr to main stream if present
+ *   @thumb   : ptr to thumbnail stream if present
+ *   @reproc  : ptr to reprocess stream if present
+ *   @main_image : ptr to main image if present
+ *   @thumb_image: ptr to thumbnail image if present
+ *   @frame   : bundled super buffer
+ *   @reproc_frame : bundled source frame buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::queryStreams(QCameraStream **main,
+        QCameraStream **thumb,
+        QCameraStream **reproc,
+        mm_camera_buf_def_t **main_image,
+        mm_camera_buf_def_t **thumb_image,
+        mm_camera_super_buf_t *frame,
+        mm_camera_super_buf_t *reproc_frame)
+{
+    if (NULL == frame) {
+        return NO_INIT;
+    }
+
+    QCameraChannel *pChannel = m_parent->getChannelByHandle(frame->ch_id);
+    // check reprocess channel if not found
+    if (pChannel == NULL) {
+        for (int8_t i = 0; i < mTotalNumReproc; i++) {
+            if ((mPPChannels[i] != NULL) &&
+                    (mPPChannels[i]->getMyHandle() == frame->ch_id)) {
+                pChannel = mPPChannels[i];
+                break;
+            }
+        }
+    }
+    if (pChannel == NULL) {
+        ALOGD("%s: No corresponding channel (ch_id = %d) exist, return here",
+              __func__, frame->ch_id);
+        return BAD_VALUE;
+    }
+
+    // Use snapshot stream to create thumbnail if snapshot and preview
+    // flip settings doesn't match in ZSL mode.
+    bool thumb_stream_needed = ((!m_parent->isZSLMode() ||
+        (m_parent->mParameters.getFlipMode(CAM_STREAM_TYPE_SNAPSHOT) ==
+         m_parent->mParameters.getFlipMode(CAM_STREAM_TYPE_PREVIEW))) &&
+        !m_parent->mParameters.generateThumbFromMain());
+
+    *main = *thumb = *reproc = NULL;
+    *main_image = *thumb_image = NULL;
+    // find snapshot frame and thumnail frame
+    for (uint32_t i = 0; i < frame->num_bufs; i++) {
+        QCameraStream *pStream =
+                pChannel->getStreamByHandle(frame->bufs[i]->stream_id);
+        if (pStream != NULL) {
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
+                    pStream->isOrignalTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
+                    (m_parent->mParameters.getofflineRAW() &&
+                            pStream->isOrignalTypeOf(CAM_STREAM_TYPE_RAW))) {
+                *main= pStream;
+                *main_image = frame->bufs[i];
+            } else if (thumb_stream_needed &&
+                       (pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
+                        pStream->isTypeOf(CAM_STREAM_TYPE_POSTVIEW) ||
+                        pStream->isOrignalTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
+                        pStream->isOrignalTypeOf(CAM_STREAM_TYPE_POSTVIEW))) {
+                *thumb = pStream;
+                *thumb_image = frame->bufs[i];
+            }
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_OFFLINE_PROC) ) {
+                *reproc = pStream;
+            }
+        }
+    }
+
+    if (thumb_stream_needed && *thumb_image == NULL && reproc_frame != NULL) {
+        QCameraChannel *pSrcReprocChannel = NULL;
+        pSrcReprocChannel = m_parent->getChannelByHandle(reproc_frame->ch_id);
+        if (pSrcReprocChannel != NULL) {
+            // find thumbnail frame
+            for (uint32_t i = 0; i < reproc_frame->num_bufs; i++) {
+                QCameraStream *pStream =
+                        pSrcReprocChannel->getStreamByHandle(
+                                reproc_frame->bufs[i]->stream_id);
+                if (pStream != NULL) {
+                    if (pStream->isTypeOf(CAM_STREAM_TYPE_PREVIEW) ||
+                        pStream->isTypeOf(CAM_STREAM_TYPE_POSTVIEW)) {
+                        *thumb = pStream;
+                        *thumb_image = reproc_frame->bufs[i];
+                    }
+                }
+            }
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+* FUNCTION   : syncStreamParams
+*
+* DESCRIPTION: Query the runtime parameters of all streams included
+*              in the main and reprocessed frames
+*
+* PARAMETERS :
+*   @frame : Main image super buffer
+*   @reproc_frame : Image supper buffer that got processed
+*
+* RETURN     : int32_t type of status
+*              NO_ERROR  -- success
+*              none-zero failure code
+*==========================================================================*/
+int32_t QCameraPostProcessor::syncStreamParams(mm_camera_super_buf_t *frame,
+        mm_camera_super_buf_t *reproc_frame)
+{
+    QCameraStream *reproc_stream = NULL;
+    QCameraStream *main_stream = NULL;
+    QCameraStream *thumb_stream = NULL;
+    mm_camera_buf_def_t *main_frame = NULL;
+    mm_camera_buf_def_t *thumb_frame = NULL;
+    int32_t ret = NO_ERROR;
+
+    ret = queryStreams(&main_stream,
+            &thumb_stream,
+            &reproc_stream,
+            &main_frame,
+            &thumb_frame,
+            frame,
+            reproc_frame);
+    if (NO_ERROR != ret) {
+        ALOGE("%s : Camera streams query from input frames failed %d",
+                __func__,
+                ret);
+        return ret;
+    }
+
+    if (NULL != main_stream) {
+        ret = main_stream->syncRuntimeParams();
+        if (NO_ERROR != ret) {
+            ALOGE("%s : Syncing of main stream runtime parameters failed %d",
+                    __func__,
+                    ret);
+            return ret;
+        }
+    }
+
+    if (NULL != thumb_stream) {
+        ret = thumb_stream->syncRuntimeParams();
+        if (NO_ERROR != ret) {
+            ALOGE("%s : Syncing of thumb stream runtime parameters failed %d",
+                    __func__,
+                    ret);
+            return ret;
+        }
+    }
+
+    if ((NULL != reproc_stream) && (reproc_stream != main_stream)) {
+        ret = reproc_stream->syncRuntimeParams();
+        if (NO_ERROR != ret) {
+            ALOGE("%s : Syncing of reproc stream runtime parameters failed %d",
+                    __func__,
+                    ret);
+            return ret;
+        }
+    }
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : encodeData
+ *
+ * DESCRIPTION: function to prepare encoding job information and send to
+ *              mm-jpeg-interface to do the encoding job
+ *
+ * PARAMETERS :
+ *   @jpeg_job_data : ptr to a struct saving job related information
+ *   @needNewSess   : flag to indicate if a new jpeg encoding session need
+ *                    to be created. After creation, this flag will be toggled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::encodeData(qcamera_jpeg_data_t *jpeg_job_data,
+                                         uint8_t &needNewSess)
+{
+    CDBG("%s : E", __func__);
+    int32_t ret = NO_ERROR;
+    mm_jpeg_job_t jpg_job;
+    uint32_t jobId = 0;
+    QCameraStream *reproc_stream = NULL;
+    QCameraStream *main_stream = NULL;
+    mm_camera_buf_def_t *main_frame = NULL;
+    QCameraStream *thumb_stream = NULL;
+    mm_camera_buf_def_t *thumb_frame = NULL;
+    mm_camera_super_buf_t *recvd_frame = jpeg_job_data->src_frame;
+    cam_rect_t crop;
+    cam_stream_parm_buffer_t param;
+    cam_stream_img_prop_t imgProp;
+
+    // find channel
+    QCameraChannel *pChannel = m_parent->getChannelByHandle(recvd_frame->ch_id);
+    // check reprocess channel if not found
+    if (pChannel == NULL) {
+        for (int8_t i = 0; i < mTotalNumReproc; i++) {
+            if ((mPPChannels[i] != NULL) &&
+                    (mPPChannels[i]->getMyHandle() == recvd_frame->ch_id)) {
+                pChannel = mPPChannels[i];
+                break;
+            }
+        }
+    }
+
+    if (pChannel == NULL) {
+        ALOGE("%s:%d] No corresponding channel (ch_id = %d) exist, return here",
+              __func__, __LINE__, recvd_frame->ch_id);
+        return BAD_VALUE;
+    }
+
+    const uint32_t jpeg_rotation = m_parent->mParameters.getJpegRotation();
+
+    ret = queryStreams(&main_stream,
+            &thumb_stream,
+            &reproc_stream,
+            &main_frame,
+            &thumb_frame,
+            recvd_frame,
+            jpeg_job_data->src_reproc_frame);
+    if (NO_ERROR != ret) {
+        return ret;
+    }
+
+    if(NULL == main_frame){
+       ALOGE("%s : Main frame is NULL", __func__);
+       return BAD_VALUE;
+    }
+
+    if(NULL == thumb_frame){
+       CDBG("%s : Thumbnail frame does not exist", __func__);
+    }
+
+    QCameraMemory *memObj = (QCameraMemory *)main_frame->mem_info;
+    if (NULL == memObj) {
+        ALOGE("%s : Memeory Obj of main frame is NULL", __func__);
+        return NO_MEMORY;
+    }
+
+    // dump snapshot frame if enabled
+    m_parent->dumpFrameToFile(main_stream, main_frame, QCAMERA_DUMP_FRM_SNAPSHOT);
+
+    // send upperlayer callback for raw image
+    camera_memory_t *mem = memObj->getMemory(main_frame->buf_idx, false);
+    if (NULL != m_parent->mDataCb &&
+        m_parent->msgTypeEnabledWithLock(CAMERA_MSG_RAW_IMAGE) > 0) {
+        qcamera_callback_argm_t cbArg;
+        memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+        cbArg.cb_type = QCAMERA_DATA_CALLBACK;
+        cbArg.msg_type = CAMERA_MSG_RAW_IMAGE;
+        cbArg.data = mem;
+        cbArg.index = 1;
+        m_parent->m_cbNotifier.notifyCallback(cbArg);
+    }
+    if (NULL != m_parent->mNotifyCb &&
+        m_parent->msgTypeEnabledWithLock(CAMERA_MSG_RAW_IMAGE_NOTIFY) > 0) {
+        qcamera_callback_argm_t cbArg;
+        memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+        cbArg.cb_type = QCAMERA_NOTIFY_CALLBACK;
+        cbArg.msg_type = CAMERA_MSG_RAW_IMAGE_NOTIFY;
+        cbArg.ext1 = 0;
+        cbArg.ext2 = 0;
+        m_parent->m_cbNotifier.notifyCallback(cbArg);
+    }
+
+    if (mJpegClientHandle <= 0) {
+        ALOGE("%s: Error: bug here, mJpegClientHandle is 0", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    if (needNewSess) {
+        // create jpeg encoding session
+        mm_jpeg_encode_params_t encodeParam;
+        memset(&encodeParam, 0, sizeof(mm_jpeg_encode_params_t));
+        ret = getJpegEncodingConfig(encodeParam, main_stream, thumb_stream);
+        if (ret != NO_ERROR) {
+            ALOGE("%s: error getting encoding config", __func__);
+            return ret;
+        }
+        CDBG_HIGH("[KPI Perf] %s : call jpeg create_session", __func__);
+        ret = mJpegHandle.create_session(mJpegClientHandle, &encodeParam, &mJpegSessionId);
+        if (ret != NO_ERROR) {
+            ALOGE("%s: error creating a new jpeg encoding session", __func__);
+            return ret;
+        }
+        needNewSess = FALSE;
+    }
+    // Fill in new job
+    memset(&jpg_job, 0, sizeof(mm_jpeg_job_t));
+    jpg_job.job_type = JPEG_JOB_TYPE_ENCODE;
+    jpg_job.encode_job.session_id = mJpegSessionId;
+    jpg_job.encode_job.src_index = (int32_t)main_frame->buf_idx;
+    jpg_job.encode_job.dst_index = 0;
+
+    if (mJpegMemOpt) {
+        jpg_job.encode_job.dst_index = jpg_job.encode_job.src_index;
+    } else if (mUseJpegBurst) {
+        jpg_job.encode_job.dst_index = -1;
+    }
+
+    cam_dimension_t src_dim;
+    memset(&src_dim, 0, sizeof(cam_dimension_t));
+    main_stream->getFrameDimension(src_dim);
+
+    bool hdr_output_crop = m_parent->mParameters.isHDROutputCropEnabled();
+    bool img_feature_enabled =
+            m_parent->mParameters.isUbiFocusEnabled() ||
+            m_parent->mParameters.isUbiRefocus() ||
+            m_parent->mParameters.isChromaFlashEnabled() ||
+            m_parent->mParameters.isOptiZoomEnabled() ||
+            m_parent->mParameters.isStillMoreEnabled();
+
+    CDBG_HIGH("%s:%d] Crop needed %d", __func__, __LINE__, img_feature_enabled);
+    crop.left = 0;
+    crop.top = 0;
+    crop.height = src_dim.height;
+    crop.width = src_dim.width;
+
+    param = main_stream->getOutputCrop();
+    for (int i = 0; i < param.outputCrop.num_of_streams; i++) {
+       if (param.outputCrop.crop_info[i].stream_id
+           == main_stream->getMyServerID()) {
+               crop = param.outputCrop.crop_info[i].crop;
+               main_stream->setCropInfo(crop);
+       }
+    }
+    if (img_feature_enabled) {
+        memset(&param, 0, sizeof(cam_stream_parm_buffer_t));
+
+        param = main_stream->getImgProp();
+        imgProp = param.imgProp;
+        main_stream->setCropInfo(imgProp.crop);
+        crop = imgProp.crop;
+        thumb_stream = NULL; /* use thumbnail from main image */
+
+        if ((reproc_stream != NULL) && (m_DataMem == NULL) &&
+                m_parent->mParameters.isUbiRefocus()) {
+
+            QCameraHeapMemory* miscBufHandler = reproc_stream->getMiscBuf();
+            cam_misc_buf_t* refocusResult =
+                    reinterpret_cast<cam_misc_buf_t *>(miscBufHandler->getPtr(0));
+            uint32_t resultSize = refocusResult->header_size +
+                    refocusResult->width * refocusResult->height;
+            camera_memory_t *dataMem = m_parent->mGetMemory(-1, resultSize,
+                    1, m_parent->mCallbackCookie);
+
+            CDBG_HIGH("%s:%d] Refocus result header %u dims %dx%d", __func__, __LINE__,
+                    resultSize, refocusResult->width, refocusResult->height);
+
+            if (dataMem && dataMem->data) {
+                memcpy(dataMem->data, refocusResult->data, resultSize);
+                //save mem pointer for depth map
+                m_DataMem = dataMem;
+            }
+        }
+    } else if ((reproc_stream != NULL) && (m_parent->mParameters.isTruePortraitEnabled())) {
+
+        QCameraHeapMemory* miscBufHandler = reproc_stream->getMiscBuf();
+        cam_misc_buf_t* tpResult =
+                reinterpret_cast<cam_misc_buf_t *>(miscBufHandler->getPtr(0));
+        uint32_t tpMetaSize = tpResult->header_size + tpResult->width * tpResult->height;
+
+        CDBG_HIGH("%s:%d] True portrait result header %d% dims dx%d", __func__, __LINE__,
+                tpMetaSize, tpResult->width, tpResult->height);
+
+        CAM_DUMP_TO_FILE(QCAMERA_DUMP_FRM_LOCATION"tp", "bm", -1, "y",
+                &tpResult->data, tpMetaSize);
+    }
+
+    cam_dimension_t dst_dim;
+
+    if (hdr_output_crop && crop.height) {
+        dst_dim.height = crop.height;
+    } else {
+        dst_dim.height = src_dim.height;
+    }
+    if (hdr_output_crop && crop.width) {
+        dst_dim.width = crop.width;
+    } else {
+        dst_dim.width = src_dim.width;
+    }
+
+    // main dim
+    jpg_job.encode_job.main_dim.src_dim = src_dim;
+    jpg_job.encode_job.main_dim.dst_dim = dst_dim;
+    jpg_job.encode_job.main_dim.crop = crop;
+
+    // get 3a sw version info
+    cam_q3a_version_t sw_version =
+        m_parent->getCamHalCapabilities()->q3a_version;
+
+    // get exif data
+    QCameraExif *pJpegExifObj = m_parent->getExifData();
+    jpeg_job_data->pJpegExifObj = pJpegExifObj;
+    if (pJpegExifObj != NULL) {
+        jpg_job.encode_job.exif_info.exif_data = pJpegExifObj->getEntries();
+        jpg_job.encode_job.exif_info.numOfEntries =
+            pJpegExifObj->getNumOfEntries();
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[0] =
+            sw_version.major_version;
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[1] =
+            sw_version.minor_version;
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[2] =
+            sw_version.patch_version;
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[3] =
+            sw_version.new_feature_des;
+    }
+
+    // set rotation only when no online rotation or offline pp rotation is done before
+    if (!m_parent->needRotationReprocess()) {
+        jpg_job.encode_job.rotation = jpeg_rotation;
+    }
+    CDBG_HIGH("%s: jpeg rotation is set to %d", __func__, jpg_job.encode_job.rotation);
+
+    // thumbnail dim
+    if (m_bThumbnailNeeded == TRUE) {
+        m_parent->getThumbnailSize(jpg_job.encode_job.thumb_dim.dst_dim);
+
+        if (thumb_stream == NULL) {
+            // need jpeg thumbnail, but no postview/preview stream exists
+            // we use the main stream/frame to encode thumbnail
+            thumb_stream = main_stream;
+            thumb_frame = main_frame;
+            if (m_parent->needRotationReprocess() &&
+                ((90 == jpeg_rotation) || (270 == jpeg_rotation))) {
+                // swap thumbnail dimensions
+                cam_dimension_t tmp_dim = jpg_job.encode_job.thumb_dim.dst_dim;
+                jpg_job.encode_job.thumb_dim.dst_dim.width = tmp_dim.height;
+                jpg_job.encode_job.thumb_dim.dst_dim.height = tmp_dim.width;
+            }
+        }
+
+        memset(&src_dim, 0, sizeof(cam_dimension_t));
+        thumb_stream->getFrameDimension(src_dim);
+        jpg_job.encode_job.thumb_dim.src_dim = src_dim;
+
+        // crop is the same if frame is the same
+        if (thumb_frame != main_frame) {
+            crop.left = 0;
+            crop.top = 0;
+            crop.height = src_dim.height;
+            crop.width = src_dim.width;
+
+            param = thumb_stream->getOutputCrop();
+            for (int i = 0; i < param.outputCrop.num_of_streams; i++) {
+               if (param.outputCrop.crop_info[i].stream_id
+                   == thumb_stream->getMyServerID()) {
+                       crop = param.outputCrop.crop_info[i].crop;
+                       thumb_stream->setCropInfo(crop);
+               }
+           }
+        }
+
+
+        jpg_job.encode_job.thumb_dim.crop = crop;
+        if (thumb_frame != NULL) {
+            jpg_job.encode_job.thumb_index = thumb_frame->buf_idx;
+        }
+        CDBG_HIGH("%s, thumbnail src w/h (%dx%d), dst w/h (%dx%d)", __func__,
+            jpg_job.encode_job.thumb_dim.src_dim.width,
+            jpg_job.encode_job.thumb_dim.src_dim.height,
+            jpg_job.encode_job.thumb_dim.dst_dim.width,
+            jpg_job.encode_job.thumb_dim.dst_dim.height);
+    }
+
+    if (thumb_frame != NULL) {
+        // dump thumbnail frame if enabled
+        m_parent->dumpFrameToFile(thumb_stream, thumb_frame, QCAMERA_DUMP_FRM_THUMBNAIL);
+    }
+
+    if (jpeg_job_data->metadata != NULL) {
+        // fill in meta data frame ptr
+        jpg_job.encode_job.p_metadata = jpeg_job_data->metadata;
+    }
+
+    jpg_job.encode_job.hal_version = CAM_HAL_V1;
+    m_parent->mExifParams.sensor_params.sens_type = m_parent->getSensorType();
+    jpg_job.encode_job.cam_exif_params = m_parent->mExifParams;
+    jpg_job.encode_job.mobicat_mask = m_parent->mParameters.getMobicatMask();
+
+
+    if (NULL != jpg_job.encode_job.p_metadata && (jpg_job.encode_job.mobicat_mask > 0)) {
+
+       /* Save a copy of mobicat params */
+       jpg_job.encode_job.p_metadata->is_mobicat_aec_params_valid =
+            jpg_job.encode_job.cam_exif_params.cam_3a_params_valid;
+
+       if (jpg_job.encode_job.cam_exif_params.cam_3a_params_valid) {
+            jpg_job.encode_job.p_metadata->mobicat_aec_params =
+                jpg_job.encode_job.cam_exif_params.cam_3a_params;
+       }
+
+       /* Save a copy of 3A debug params */
+        jpg_job.encode_job.p_metadata->is_statsdebug_ae_params_valid =
+            jpg_job.encode_job.cam_exif_params.ae_debug_params_valid;
+        jpg_job.encode_job.p_metadata->is_statsdebug_awb_params_valid =
+            jpg_job.encode_job.cam_exif_params.awb_debug_params_valid;
+        jpg_job.encode_job.p_metadata->is_statsdebug_af_params_valid =
+            jpg_job.encode_job.cam_exif_params.af_debug_params_valid;
+        jpg_job.encode_job.p_metadata->is_statsdebug_asd_params_valid =
+            jpg_job.encode_job.cam_exif_params.asd_debug_params_valid;
+        jpg_job.encode_job.p_metadata->is_statsdebug_stats_params_valid =
+            jpg_job.encode_job.cam_exif_params.stats_debug_params_valid;
+
+        if (jpg_job.encode_job.cam_exif_params.ae_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_ae_data =
+                jpg_job.encode_job.cam_exif_params.ae_debug_params;
+        }
+        if (jpg_job.encode_job.cam_exif_params.awb_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_awb_data =
+                jpg_job.encode_job.cam_exif_params.awb_debug_params;
+        }
+        if (jpg_job.encode_job.cam_exif_params.af_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_af_data =
+                jpg_job.encode_job.cam_exif_params.af_debug_params;
+        }
+        if (jpg_job.encode_job.cam_exif_params.asd_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_asd_data =
+                jpg_job.encode_job.cam_exif_params.asd_debug_params;
+        }
+        if (jpg_job.encode_job.cam_exif_params.stats_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_stats_buffer_data =
+                jpg_job.encode_job.cam_exif_params.stats_debug_params;
+        }
+    }
+
+    /* Init the QTable */
+    for (int i = 0; i < QTABLE_MAX; i++) {
+        jpg_job.encode_job.qtable_set[i] = 0;
+    }
+
+    CDBG_HIGH("[KPI Perf] %s : PROFILE_JPEG_JOB_START", __func__);
+    ret = mJpegHandle.start_job(&jpg_job, &jobId);
+    if (ret == NO_ERROR) {
+        // remember job info
+        jpeg_job_data->jobId = jobId;
+    }
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : processRawImageImpl
+ *
+ * DESCRIPTION: function to send raw image to upper layer
+ *
+ * PARAMETERS :
+ *   @recvd_frame   : frame to be encoded
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::processRawImageImpl(mm_camera_super_buf_t *recvd_frame)
+{
+    int32_t rc = NO_ERROR;
+
+    QCameraChannel *pChannel = m_parent->getChannelByHandle(recvd_frame->ch_id);
+    QCameraStream *pStream = NULL;
+    mm_camera_buf_def_t *frame = NULL;
+    // check reprocess channel if not found
+    if (pChannel == NULL) {
+        for (int8_t i = 0; i < mTotalNumReproc; i++) {
+            if ((mPPChannels[i] != NULL) &&
+                    (mPPChannels[i]->getMyHandle() == recvd_frame->ch_id)) {
+                pChannel = mPPChannels[i];
+                break;
+            }
+        }
+    }
+    if (pChannel == NULL) {
+        ALOGE("%s:%d] No corresponding channel (ch_id = %d) exist, return here",
+              __func__, __LINE__, recvd_frame->ch_id);
+        return BAD_VALUE;
+    }
+
+    // find snapshot frame
+    for (uint32_t i = 0; i < recvd_frame->num_bufs; i++) {
+        QCameraStream *pCurStream =
+            pChannel->getStreamByHandle(recvd_frame->bufs[i]->stream_id);
+        if (pCurStream != NULL) {
+            if (pCurStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
+                pCurStream->isTypeOf(CAM_STREAM_TYPE_RAW) ||
+                pCurStream->isOrignalTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
+                pCurStream->isOrignalTypeOf(CAM_STREAM_TYPE_RAW)) {
+                pStream = pCurStream;
+                frame = recvd_frame->bufs[i];
+                break;
+            }
+        }
+    }
+
+    if ( NULL == frame ) {
+        ALOGE("%s: No valid raw buffer", __func__);
+        return BAD_VALUE;
+    }
+
+    QCameraMemory *rawMemObj = (QCameraMemory *)frame->mem_info;
+    bool zslChannelUsed = m_parent->isZSLMode() &&
+            ( pChannel != mPPChannels[0] );
+    camera_memory_t *raw_mem = NULL;
+
+    if (rawMemObj != NULL) {
+        if (zslChannelUsed) {
+            raw_mem = rawMemObj->getMemory(frame->buf_idx, false);
+        } else {
+            raw_mem = m_parent->mGetMemory(-1,
+                                           frame->frame_len,
+                                           1,
+                                           m_parent->mCallbackCookie);
+            if (NULL == raw_mem) {
+                ALOGE("%s : Not enough memory for RAW cb ", __func__);
+                return NO_MEMORY;
+            }
+            memcpy(raw_mem->data, frame->buffer, frame->frame_len);
+        }
+    }
+
+    if (NULL != rawMemObj && NULL != raw_mem) {
+        // dump frame into file
+        if (frame->stream_type == CAM_STREAM_TYPE_SNAPSHOT ||
+            pStream->isOrignalTypeOf(CAM_STREAM_TYPE_SNAPSHOT)) {
+            // for YUV422 NV16 case
+            m_parent->dumpFrameToFile(pStream, frame, QCAMERA_DUMP_FRM_SNAPSHOT);
+        } else {
+            //Received RAW snapshot taken notification
+            m_parent->dumpFrameToFile(pStream, frame, QCAMERA_DUMP_FRM_RAW);
+
+            if(true == m_parent->m_bIntRawEvtPending) {
+              //Sending RAW snapshot taken notification to HAL
+              memset(&m_dst_dim, 0, sizeof(m_dst_dim));
+              pStream->getFrameDimension(m_dst_dim);
+              pthread_mutex_lock(&m_parent->m_int_lock);
+              pthread_cond_signal(&m_parent->m_int_cond);
+              pthread_mutex_unlock(&m_parent->m_int_lock);
+              raw_mem->release(raw_mem);
+              return rc;
+            }
+        }
+
+        // send data callback / notify for RAW_IMAGE
+        if (NULL != m_parent->mDataCb &&
+            m_parent->msgTypeEnabledWithLock(CAMERA_MSG_RAW_IMAGE) > 0) {
+            qcamera_callback_argm_t cbArg;
+            memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+            cbArg.cb_type = QCAMERA_DATA_CALLBACK;
+            cbArg.msg_type = CAMERA_MSG_RAW_IMAGE;
+            cbArg.data = raw_mem;
+            cbArg.index = 0;
+            m_parent->m_cbNotifier.notifyCallback(cbArg);
+        }
+        if (NULL != m_parent->mNotifyCb &&
+            m_parent->msgTypeEnabledWithLock(CAMERA_MSG_RAW_IMAGE_NOTIFY) > 0) {
+            qcamera_callback_argm_t cbArg;
+            memset(&cbArg, 0, sizeof(qcamera_callback_argm_t));
+            cbArg.cb_type = QCAMERA_NOTIFY_CALLBACK;
+            cbArg.msg_type = CAMERA_MSG_RAW_IMAGE_NOTIFY;
+            cbArg.ext1 = 0;
+            cbArg.ext2 = 0;
+            m_parent->m_cbNotifier.notifyCallback(cbArg);
+        }
+
+        if ((m_parent->mDataCb != NULL) &&
+            m_parent->msgTypeEnabledWithLock(CAMERA_MSG_COMPRESSED_IMAGE) > 0) {
+            qcamera_release_data_t release_data;
+            memset(&release_data, 0, sizeof(qcamera_release_data_t));
+            if ( zslChannelUsed ) {
+                release_data.frame = recvd_frame;
+            } else {
+                release_data.data = raw_mem;
+            }
+            rc = sendDataNotify(CAMERA_MSG_COMPRESSED_IMAGE,
+                                raw_mem,
+                                0,
+                                NULL,
+                                &release_data);
+        } else {
+            raw_mem->release(raw_mem);
+        }
+    } else {
+        ALOGE("%s: Cannot get raw mem", __func__);
+        rc = UNKNOWN_ERROR;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : dataSaveRoutine
+ *
+ * DESCRIPTION: data saving routine
+ *
+ * PARAMETERS :
+ *   @data    : user data ptr (QCameraPostProcessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void *QCameraPostProcessor::dataSaveRoutine(void *data)
+{
+    int running = 1;
+    int ret;
+    uint8_t is_active = FALSE;
+    QCameraPostProcessor *pme = (QCameraPostProcessor *)data;
+    QCameraCmdThread *cmdThread = &pme->m_saveProcTh;
+    cmdThread->setName("CAM_JpegSave");
+    char saveName[PROPERTY_VALUE_MAX];
+
+    CDBG_HIGH("%s: E", __func__);
+    do {
+        do {
+            ret = cam_sem_wait(&cmdThread->cmd_sem);
+            if (ret != 0 && errno != EINVAL) {
+                ALOGE("%s: cam_sem_wait error (%s)",
+                           __func__, strerror(errno));
+                return NULL;
+            }
+        } while (ret != 0);
+
+        // we got notified about new cmd avail in cmd queue
+        camera_cmd_type_t cmd = cmdThread->getCmd();
+        switch (cmd) {
+        case CAMERA_CMD_TYPE_START_DATA_PROC:
+            CDBG_HIGH("%s: start data proc", __func__);
+            is_active = TRUE;
+            pme->m_inputSaveQ.init();
+            break;
+        case CAMERA_CMD_TYPE_STOP_DATA_PROC:
+            {
+                CDBG_HIGH("%s: stop data proc", __func__);
+                is_active = FALSE;
+
+                // flush input save Queue
+                pme->m_inputSaveQ.flush();
+
+                // signal cmd is completed
+                cam_sem_post(&cmdThread->sync_sem);
+            }
+            break;
+        case CAMERA_CMD_TYPE_DO_NEXT_JOB:
+            {
+                CDBG_HIGH("%s: Do next job, active is %d", __func__, is_active);
+
+                qcamera_jpeg_evt_payload_t *job_data = (qcamera_jpeg_evt_payload_t *) pme->m_inputSaveQ.dequeue();
+                if (job_data == NULL) {
+                    ALOGE("%s: Invalid jpeg event data", __func__);
+                    continue;
+                }
+
+                pme->m_ongoingJpegQ.flushNodes(matchJobId, (void*)&job_data->jobId);
+
+                CDBG_HIGH("[KPI Perf] %s : jpeg job %d", __func__, job_data->jobId);
+
+                if (is_active == TRUE) {
+                    memset(saveName, '\0', sizeof(saveName));
+                    snprintf(saveName,
+                             sizeof(saveName),
+                             QCameraPostProcessor::STORE_LOCATION,
+                             pme->mSaveFrmCnt);
+
+                    int file_fd = open(saveName, O_RDWR | O_CREAT, 0655);
+                    if (file_fd >= 0) {
+                        ssize_t written_len = write(file_fd, job_data->out_data.buf_vaddr,
+                                job_data->out_data.buf_filled_len);
+                        if ((ssize_t)job_data->out_data.buf_filled_len != written_len) {
+                            ALOGE("%s: Failed save complete data %d bytes "
+                                  "written instead of %d bytes!",
+                                  __func__, written_len,
+                                  job_data->out_data.buf_filled_len);
+                        } else {
+                            CDBG_HIGH("%s: written number of bytes %d\n",
+                                __func__, written_len);
+                        }
+
+                        close(file_fd);
+                    } else {
+                        ALOGE("%s: fail t open file for saving", __func__);
+                    }
+                    pme->mSaveFrmCnt++;
+
+                    camera_memory_t* jpeg_mem = pme->m_parent->mGetMemory(-1,
+                                                         strlen(saveName),
+                                                         1,
+                                                         pme->m_parent->mCallbackCookie);
+                    if (NULL == jpeg_mem) {
+                        ret = NO_MEMORY;
+                        ALOGE("%s : getMemory for jpeg, ret = NO_MEMORY", __func__);
+                        goto end;
+                    }
+                    memcpy(jpeg_mem->data, saveName, strlen(saveName));
+
+                    CDBG_HIGH("%s : Calling upperlayer callback to store JPEG image", __func__);
+                    qcamera_release_data_t release_data;
+                    memset(&release_data, 0, sizeof(qcamera_release_data_t));
+                    release_data.data = jpeg_mem;
+                    release_data.unlinkFile = true;
+                    CDBG_HIGH("[KPI Perf] %s: PROFILE_JPEG_CB ",__func__);
+                    ret = pme->sendDataNotify(CAMERA_MSG_COMPRESSED_IMAGE,
+                                        jpeg_mem,
+                                        0,
+                                        NULL,
+                                        &release_data);
+                }
+
+end:
+                free(job_data);
+            }
+            break;
+        case CAMERA_CMD_TYPE_EXIT:
+            CDBG_HIGH("%s : save thread exit", __func__);
+            running = 0;
+            break;
+        default:
+            break;
+        }
+    } while (running);
+    CDBG_HIGH("%s: X", __func__);
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : dataProcessRoutine
+ *
+ * DESCRIPTION: data process routine that handles input data either from input
+ *              Jpeg Queue to do jpeg encoding, or from input PP Queue to do
+ *              reprocess.
+ *
+ * PARAMETERS :
+ *   @data    : user data ptr (QCameraPostProcessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void *QCameraPostProcessor::dataProcessRoutine(void *data)
+{
+    int running = 1;
+    int ret;
+    uint8_t is_active = FALSE;
+    QCameraPostProcessor *pme = (QCameraPostProcessor *)data;
+    QCameraCmdThread *cmdThread = &pme->m_dataProcTh;
+    cmdThread->setName("CAM_JpegProc");
+
+    CDBG_HIGH("%s: E", __func__);
+    do {
+        do {
+            ret = cam_sem_wait(&cmdThread->cmd_sem);
+            if (ret != 0 && errno != EINVAL) {
+                ALOGE("%s: cam_sem_wait error (%s)",
+                           __func__, strerror(errno));
+                return NULL;
+            }
+        } while (ret != 0);
+
+        // we got notified about new cmd avail in cmd queue
+        camera_cmd_type_t cmd = cmdThread->getCmd();
+        switch (cmd) {
+        case CAMERA_CMD_TYPE_START_DATA_PROC:
+            CDBG_HIGH("%s: start data proc", __func__);
+            is_active = TRUE;
+
+            pme->m_ongoingPPQ.init();
+            pme->m_inputJpegQ.init();
+            pme->m_inputPPQ.init();
+            pme->m_inputRawQ.init();
+
+            pme->m_saveProcTh.sendCmd(CAMERA_CMD_TYPE_START_DATA_PROC,
+                                      FALSE,
+                                      FALSE);
+
+            // signal cmd is completed
+            cam_sem_post(&cmdThread->sync_sem);
+
+            break;
+        case CAMERA_CMD_TYPE_STOP_DATA_PROC:
+            {
+                CDBG_HIGH("%s: stop data proc", __func__);
+                is_active = FALSE;
+
+                pme->m_saveProcTh.sendCmd(CAMERA_CMD_TYPE_STOP_DATA_PROC,
+                                           TRUE,
+                                           TRUE);
+                // cancel all ongoing jpeg jobs
+                qcamera_jpeg_data_t *jpeg_job =
+                    (qcamera_jpeg_data_t *)pme->m_ongoingJpegQ.dequeue();
+                while (jpeg_job != NULL) {
+                    pme->mJpegHandle.abort_job(jpeg_job->jobId);
+
+                    pme->releaseJpegJobData(jpeg_job);
+                    free(jpeg_job);
+
+                    jpeg_job = (qcamera_jpeg_data_t *)pme->m_ongoingJpegQ.dequeue();
+                }
+
+                // destroy jpeg encoding session
+                if ( 0 < pme->mJpegSessionId ) {
+                    pme->mJpegHandle.destroy_session(pme->mJpegSessionId);
+                    pme->mJpegSessionId = 0;
+                }
+
+                // free jpeg out buf and exif obj
+                FREE_JPEG_OUTPUT_BUFFER(pme->m_pJpegOutputMem,
+                    pme->m_JpegOutputMemCount);
+
+                if (pme->m_pJpegExifObj != NULL) {
+                    delete pme->m_pJpegExifObj;
+                    pme->m_pJpegExifObj = NULL;
+                }
+
+                // flush ongoing postproc Queue
+                pme->m_ongoingPPQ.flush();
+
+                // flush input jpeg Queue
+                pme->m_inputJpegQ.flush();
+
+                // flush input Postproc Queue
+                pme->m_inputPPQ.flush();
+
+                // flush input raw Queue
+                pme->m_inputRawQ.flush();
+
+                // signal cmd is completed
+                cam_sem_post(&cmdThread->sync_sem);
+
+                pme->mNewJpegSessionNeeded = true;
+            }
+            break;
+        case CAMERA_CMD_TYPE_DO_NEXT_JOB:
+            {
+                CDBG_HIGH("%s: Do next job, active is %d", __func__, is_active);
+                if (is_active == TRUE) {
+                    qcamera_jpeg_data_t *jpeg_job =
+                        (qcamera_jpeg_data_t *)pme->m_inputJpegQ.dequeue();
+
+                    if (NULL != jpeg_job) {
+                        // To avoid any race conditions,
+                        // sync any stream specific parameters here.
+                        pme->syncStreamParams(jpeg_job->src_frame, NULL);
+
+                        // add into ongoing jpeg job Q
+                        if (pme->m_ongoingJpegQ.enqueue((void *)jpeg_job)) {
+                            ret = pme->encodeData(jpeg_job,
+                                      pme->mNewJpegSessionNeeded);
+                            if (NO_ERROR != ret) {
+                                // dequeue the last one
+                                pme->m_ongoingJpegQ.dequeue(false);
+                                pme->releaseJpegJobData(jpeg_job);
+                                free(jpeg_job);
+                                jpeg_job = NULL;
+                                pme->sendEvtNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
+                            }
+                        } else {
+                            CDBG_HIGH("%s : m_ongoingJpegQ is not active!!!", __func__);
+                            pme->releaseJpegJobData(jpeg_job);
+                            free(jpeg_job);
+                            jpeg_job = NULL;
+                        }
+                    }
+
+
+                    // process raw data if any
+                    mm_camera_super_buf_t *super_buf =
+                        (mm_camera_super_buf_t *)pme->m_inputRawQ.dequeue();
+
+                    if (NULL != super_buf) {
+                        //play shutter sound
+                        pme->m_parent->playShutter();
+                        ret = pme->processRawImageImpl(super_buf);
+                        if (NO_ERROR != ret) {
+                            pme->releaseSuperBuf(super_buf);
+                            free(super_buf);
+                            pme->sendEvtNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
+                        }
+                    }
+
+                    ret = pme->doReprocess();
+                    if (NO_ERROR != ret) {
+                        pme->sendEvtNotify(CAMERA_MSG_ERROR, UNKNOWN_ERROR, 0);
+                    } else {
+                        ret = pme->stopCapture();
+                    }
+
+                } else {
+                    // not active, simply return buf and do no op
+                    qcamera_jpeg_data_t *jpeg_data =
+                        (qcamera_jpeg_data_t *)pme->m_inputJpegQ.dequeue();
+                    if (NULL != jpeg_data) {
+                        pme->releaseJpegJobData(jpeg_data);
+                        free(jpeg_data);
+                    }
+                    mm_camera_super_buf_t *super_buf =
+                        (mm_camera_super_buf_t *)pme->m_inputRawQ.dequeue();
+                    if (NULL != super_buf) {
+                        pme->releaseSuperBuf(super_buf);
+                        free(super_buf);
+                    }
+
+                    // flush input Postproc Queue
+                    pme->m_inputPPQ.flush();
+                }
+            }
+            break;
+        case CAMERA_CMD_TYPE_EXIT:
+            running = 0;
+            break;
+        default:
+            break;
+        }
+    } while (running);
+    CDBG_HIGH("%s: X", __func__);
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : doReprocess
+ *
+ * DESCRIPTION: Trigger channel reprocessing
+ *
+ * PARAMETERS :None
+ *
+ * RETURN     : int32_t type of status
+ *                    NO_ERROR  -- success
+ *                    none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::doReprocess()
+{
+    int32_t ret = NO_ERROR;
+    QCameraChannel *m_pSrcChannel;
+    QCameraStream *pMetaStream = NULL;
+    uint8_t meta_buf_index = 0;
+    mm_camera_buf_def_t *meta_buf = NULL;
+    bool found_meta = FALSE;
+
+    qcamera_pp_request_t *ppreq_job = (qcamera_pp_request_t *)m_inputPPQ.peek();
+    if ((ppreq_job == NULL) || (ppreq_job->src_frame == NULL)) {
+        return ret;
+    }
+
+    if (!validatePostProcess(ppreq_job->src_frame)) {
+        return ret;
+    }
+
+    ppreq_job = (qcamera_pp_request_t *)m_inputPPQ.dequeue();
+    if (ppreq_job == NULL || ppreq_job->src_frame == NULL ||
+            ppreq_job->src_reproc_frame == NULL) {
+        return ret;
+    }
+
+    mm_camera_super_buf_t *src_frame = ppreq_job->src_frame;
+    mm_camera_super_buf_t *src_reproc_frame = ppreq_job->src_reproc_frame;
+    int8_t mCurReprocCount = ppreq_job->reprocCount;
+
+    CDBG("%s: frame = %p src_frame = %p mCurReprocCount = %d",__func__,
+            src_frame,src_reproc_frame,mCurReprocCount);
+
+    // find meta data stream and index of meta data frame in the superbuf
+    for (int8_t j = 0; j < mTotalNumReproc; j++) {
+        uint32_t i;
+        m_pSrcChannel = mPPChannels[j]->getSrcChannel();
+        if (m_pSrcChannel == NULL)
+            continue;
+        for (i = 0; i < src_reproc_frame->num_bufs; i++) {
+            QCameraStream *pStream =
+                    m_pSrcChannel->getStreamByHandle(src_reproc_frame->bufs[i]->stream_id);
+            if (pStream != NULL && pStream->isTypeOf(CAM_STREAM_TYPE_METADATA)) {
+                meta_buf_index = (uint8_t) src_reproc_frame->bufs[i]->buf_idx;
+                pMetaStream = pStream;
+                meta_buf = src_reproc_frame->bufs[i];
+                break;
+            }
+        }
+        if (i < src_reproc_frame->num_bufs) {
+            CDBG(" %s: Found Meta data info for reprocessing index = %d", __func__,
+                    (int)meta_buf_index);
+            break;
+        }
+    }
+
+    qcamera_pp_data_t *pp_job =
+            (qcamera_pp_data_t *)malloc(sizeof(qcamera_pp_data_t));
+    if (pp_job != NULL) {
+        syncStreamParams(src_frame, src_reproc_frame);
+        memset(pp_job, 0, sizeof(qcamera_pp_data_t));
+        if (mPPChannels[mCurReprocCount] != NULL) {
+            // add into ongoing PP job Q
+            pp_job->src_frame = src_frame;
+            pp_job->src_reproc_frame = src_reproc_frame;
+            pp_job->reprocCount = (int8_t) (mCurReprocCount + 1);
+
+            if (m_parent->isRegularCapture()) {
+                if ((NULL != pp_job->src_frame) &&
+                    (0 < pp_job->src_frame->num_bufs)) {
+                    mm_camera_buf_def_t *bufs = NULL;
+                    uint32_t num_bufs = pp_job->src_frame->num_bufs;
+                    bufs = new mm_camera_buf_def_t[num_bufs];
+                    if (NULL == bufs) {
+                        ALOGE("%s:Unable to allocate cached buffers",
+                            __func__);
+                        return NO_MEMORY;
+                    }
+
+                    for (uint32_t i = 0; i < num_bufs; i++) {
+                        bufs[i] = *pp_job->src_frame->bufs[i];
+                        pp_job->src_frame->bufs[i] = &bufs[i];
+                    }
+                    pp_job->src_reproc_bufs = bufs;
+                }
+
+                m_bufCountPPQ++;
+
+                // Don't release source frame after encoding
+                // at this point the source channel will not exist.
+                pp_job->reproc_frame_release = true;
+                if (m_ongoingPPQ.enqueue((void *)pp_job)) {
+                    ret = mPPChannels[mCurReprocCount]->doReprocessOffline(pp_job->src_frame,
+                            meta_buf);
+                } else {
+                    CDBG_HIGH("%s : m_ongoingJpegQ is not active!!!", __func__);
+                    releaseOngoingPPData(pp_job, this);
+                    free(pp_job);
+                    pp_job = NULL;
+                }
+            } else {
+
+                m_bufCountPPQ++;
+                if (!m_ongoingPPQ.enqueue((void *)pp_job)) {
+                    CDBG_HIGH("%s : m_ongoingJpegQ is not active!!!", __func__);
+                    releaseOngoingPPData(pp_job, this);
+                    free(pp_job);
+                    pp_job = NULL;
+                    goto end;
+                }
+
+                int32_t numRequiredPPQBufsForSingleOutput = (int32_t)
+                        m_parent->mParameters.getNumberInBufsForSingleShot();
+
+                if (m_bufCountPPQ % numRequiredPPQBufsForSingleOutput == 0) {
+                    int32_t extra_pp_job_count =
+                            m_parent->mParameters.getNumberOutBufsForSingleShot() -
+                            m_parent->mParameters.getNumberInBufsForSingleShot();
+
+                    for (int32_t i = 0; i < extra_pp_job_count; i++) {
+                        qcamera_pp_data_t *extra_pp_job =
+                                (qcamera_pp_data_t *)calloc(1, sizeof(qcamera_pp_data_t));
+                        if (!extra_pp_job) {
+                            ALOGE("%s: no mem for qcamera_pp_data_t", __func__);
+                            ret = NO_MEMORY;
+                            break;
+                        }
+                        extra_pp_job->reprocCount = pp_job->reprocCount;
+                        if (!m_ongoingPPQ.enqueue((void *)extra_pp_job)) {
+                            CDBG_HIGH("%s : m_ongoingJpegQ is not active!!!", __func__);
+                            releaseOngoingPPData(extra_pp_job, this);
+                            free(extra_pp_job);
+                            extra_pp_job = NULL;
+                            goto end;
+                        }
+                    }
+                }
+
+                ret = mPPChannels[mCurReprocCount]->doReprocess(pp_job->src_frame,
+                        m_parent->mParameters, pMetaStream, meta_buf_index);
+            }
+        } else {
+            ALOGE("%s: Reprocess channel is NULL", __func__);
+            if (pp_job != NULL) {
+                free(pp_job);
+                pp_job = NULL;
+            }
+            releasePPInputData(ppreq_job, this);
+            ret = UNKNOWN_ERROR;
+        }
+    } else {
+        ALOGE("%s: no mem for qcamera_pp_data_t", __func__);
+        ret = NO_MEMORY;
+    }
+
+end:
+    free(ppreq_job);
+    ppreq_job = NULL;
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getReprocChannel
+ *
+ * DESCRIPTION:  Returns reprocessing channel handle
+ *
+ * PARAMETERS : index for reprocessing array
+ *
+ * RETURN     : QCameraReprocessChannel * type of pointer
+                       NULL if no reprocessing channel
+ *==========================================================================*/
+QCameraReprocessChannel * QCameraPostProcessor::getReprocChannel(uint8_t index)
+{
+    if (index >= mTotalNumReproc) {
+        ALOGE("%s: Invalid index value",__func__);
+        return NULL;
+    }
+    return mPPChannels[index];
+}
+
+/*===========================================================================
+ * FUNCTION   : stopCapture
+ *
+ * DESCRIPTION: Trigger image capture stop
+ *
+ * PARAMETERS :
+ * None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::stopCapture()
+{
+     int rc = NO_ERROR;
+
+     if (m_parent->isRegularCapture()) {
+        rc = m_parent->processAPI(
+                        QCAMERA_SM_EVT_STOP_CAPTURE_CHANNEL,
+                        NULL);
+     }
+
+     return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getJpegPaddingReq
+ *
+ * DESCRIPTION: function to add an entry to exif data
+ *
+ * PARAMETERS :
+ *   @padding_info : jpeg specific padding requirement
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraPostProcessor::getJpegPaddingReq(cam_padding_info_t &padding_info)
+{
+    // TODO: hardcode for now, needs to query from mm-jpeg-interface
+    padding_info.width_padding  = CAM_PAD_NONE;
+    padding_info.height_padding  = CAM_PAD_TO_16;
+    padding_info.plane_padding  = CAM_PAD_TO_WORD;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setYUVFrameInfo
+ *
+ * DESCRIPTION: set Raw YUV frame data info for up-layer
+ *
+ * PARAMETERS :
+ *   @frame   : process frame received from mm-camera-interface
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : currently we return frame len, y offset, cbcr offset and frame format
+ *==========================================================================*/
+int32_t QCameraPostProcessor::setYUVFrameInfo(mm_camera_super_buf_t *recvd_frame)
+{
+    QCameraChannel *pChannel = m_parent->getChannelByHandle(recvd_frame->ch_id);
+    // check reprocess channel if not found
+    if (pChannel == NULL) {
+        for (int8_t i = 0; i < mTotalNumReproc; i++) {
+            if ((mPPChannels[i] != NULL) &&
+                    (mPPChannels[i]->getMyHandle() == recvd_frame->ch_id)) {
+                pChannel = mPPChannels[i];
+                break;
+            }
+        }
+    }
+
+    if (pChannel == NULL) {
+        ALOGE("%s:%d] No corresponding channel (ch_id = %d) exist, return here",
+              __func__, __LINE__, recvd_frame->ch_id);
+        return BAD_VALUE;
+    }
+
+    // find snapshot frame
+    for (uint32_t i = 0; i < recvd_frame->num_bufs; i++) {
+        QCameraStream *pStream =
+            pChannel->getStreamByHandle(recvd_frame->bufs[i]->stream_id);
+        if (pStream != NULL) {
+            if (pStream->isTypeOf(CAM_STREAM_TYPE_SNAPSHOT) ||
+                pStream->isOrignalTypeOf(CAM_STREAM_TYPE_SNAPSHOT)) {
+                //get the main frame, use stream info
+                cam_frame_len_offset_t frame_offset;
+                cam_dimension_t frame_dim;
+                cam_format_t frame_fmt;
+                const char *fmt_string;
+                pStream->getFrameDimension(frame_dim);
+                pStream->getFrameOffset(frame_offset);
+                pStream->getFormat(frame_fmt);
+                fmt_string = m_parent->mParameters.getFrameFmtString(frame_fmt);
+
+                int cbcr_offset = (int32_t)frame_offset.mp[0].len -
+                        frame_dim.width * frame_dim.height;
+                m_parent->mParameters.set("snapshot-framelen", (int)frame_offset.frame_len);
+                m_parent->mParameters.set("snapshot-yoff", (int)frame_offset.mp[0].offset);
+                m_parent->mParameters.set("snapshot-cbcroff", cbcr_offset);
+                if (fmt_string != NULL) {
+                    m_parent->mParameters.set("snapshot-format", fmt_string);
+                } else {
+                    m_parent->mParameters.set("snapshot-format", "");
+                }
+
+                CDBG_HIGH("%s: frame width=%d, height=%d, yoff=%d, cbcroff=%d, fmt_string=%s", __func__,
+                        frame_dim.width, frame_dim.height, frame_offset.mp[0].offset, cbcr_offset, fmt_string);
+                return NO_ERROR;
+            }
+        }
+    }
+
+    return BAD_VALUE;
+}
+
+bool QCameraPostProcessor::matchJobId(void *data, void *, void *match_data)
+{
+  qcamera_jpeg_data_t * job = (qcamera_jpeg_data_t *) data;
+  uint32_t job_id = *((uint32_t *) match_data);
+  return job->jobId == job_id;
+}
+
+/*===========================================================================
+ * FUNCTION   : getJpegMemory
+ *
+ * DESCRIPTION: buffer allocation function
+ *   to pass to jpeg interface
+ *
+ * PARAMETERS :
+ *   @out_buf : buffer descriptor struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCameraPostProcessor::getJpegMemory(omx_jpeg_ouput_buf_t *out_buf)
+{
+    CDBG_HIGH("%s: Allocating jpeg out buffer of size: %d", __func__, out_buf->size);
+    QCameraPostProcessor *procInst = (QCameraPostProcessor *) out_buf->handle;
+    camera_memory_t *cam_mem = procInst->m_parent->mGetMemory(out_buf->fd, out_buf->size, 1U,
+            procInst->m_parent->mCallbackCookie);
+    out_buf->mem_hdl = cam_mem;
+    out_buf->vaddr = cam_mem->data;
+
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraExif
+ *
+ * DESCRIPTION: constructor of QCameraExif
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraExif::QCameraExif()
+    : m_nNumEntries(0)
+{
+    memset(m_Entries, 0, sizeof(m_Entries));
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraExif
+ *
+ * DESCRIPTION: deconstructor of QCameraExif. Will release internal memory ptr.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraExif::~QCameraExif()
+{
+    for (uint32_t i = 0; i < m_nNumEntries; i++) {
+        switch (m_Entries[i].tag_entry.type) {
+        case EXIF_BYTE:
+            {
+                if (m_Entries[i].tag_entry.count > 1 &&
+                    m_Entries[i].tag_entry.data._bytes != NULL) {
+                    free(m_Entries[i].tag_entry.data._bytes);
+                    m_Entries[i].tag_entry.data._bytes = NULL;
+                }
+            }
+            break;
+        case EXIF_ASCII:
+            {
+                if (m_Entries[i].tag_entry.data._ascii != NULL) {
+                    free(m_Entries[i].tag_entry.data._ascii);
+                    m_Entries[i].tag_entry.data._ascii = NULL;
+                }
+            }
+            break;
+        case EXIF_SHORT:
+            {
+                if (m_Entries[i].tag_entry.count > 1 &&
+                    m_Entries[i].tag_entry.data._shorts != NULL) {
+                    free(m_Entries[i].tag_entry.data._shorts);
+                    m_Entries[i].tag_entry.data._shorts = NULL;
+                }
+            }
+            break;
+        case EXIF_LONG:
+            {
+                if (m_Entries[i].tag_entry.count > 1 &&
+                    m_Entries[i].tag_entry.data._longs != NULL) {
+                    free(m_Entries[i].tag_entry.data._longs);
+                    m_Entries[i].tag_entry.data._longs = NULL;
+                }
+            }
+            break;
+        case EXIF_RATIONAL:
+            {
+                if (m_Entries[i].tag_entry.count > 1 &&
+                    m_Entries[i].tag_entry.data._rats != NULL) {
+                    free(m_Entries[i].tag_entry.data._rats);
+                    m_Entries[i].tag_entry.data._rats = NULL;
+                }
+            }
+            break;
+        case EXIF_UNDEFINED:
+            {
+                if (m_Entries[i].tag_entry.data._undefined != NULL) {
+                    free(m_Entries[i].tag_entry.data._undefined);
+                    m_Entries[i].tag_entry.data._undefined = NULL;
+                }
+            }
+            break;
+        case EXIF_SLONG:
+            {
+                if (m_Entries[i].tag_entry.count > 1 &&
+                    m_Entries[i].tag_entry.data._slongs != NULL) {
+                    free(m_Entries[i].tag_entry.data._slongs);
+                    m_Entries[i].tag_entry.data._slongs = NULL;
+                }
+            }
+            break;
+        case EXIF_SRATIONAL:
+            {
+                if (m_Entries[i].tag_entry.count > 1 &&
+                    m_Entries[i].tag_entry.data._srats != NULL) {
+                    free(m_Entries[i].tag_entry.data._srats);
+                    m_Entries[i].tag_entry.data._srats = NULL;
+                }
+            }
+            break;
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : addEntry
+ *
+ * DESCRIPTION: function to add an entry to exif data
+ *
+ * PARAMETERS :
+ *   @tagid   : exif tag ID
+ *   @type    : data type
+ *   @count   : number of data in uint of its type
+ *   @data    : input data ptr
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraExif::addEntry(exif_tag_id_t tagid,
+                              exif_tag_type_t type,
+                              uint32_t count,
+                              void *data)
+{
+    int32_t rc = NO_ERROR;
+    if(m_nNumEntries >= MAX_EXIF_TABLE_ENTRIES) {
+        ALOGE("%s: Number of entries exceeded limit", __func__);
+        return NO_MEMORY;
+    }
+
+    m_Entries[m_nNumEntries].tag_id = tagid;
+    m_Entries[m_nNumEntries].tag_entry.type = type;
+    m_Entries[m_nNumEntries].tag_entry.count = count;
+    m_Entries[m_nNumEntries].tag_entry.copy = 1;
+    switch (type) {
+    case EXIF_BYTE:
+        {
+            if (count > 1) {
+                uint8_t *values = (uint8_t *)malloc(count);
+                if (values == NULL) {
+                    ALOGE("%s: No memory for byte array", __func__);
+                    rc = NO_MEMORY;
+                } else {
+                    memcpy(values, data, count);
+                    m_Entries[m_nNumEntries].tag_entry.data._bytes = values;
+                }
+            } else {
+                m_Entries[m_nNumEntries].tag_entry.data._byte = *(uint8_t *)data;
+            }
+        }
+        break;
+    case EXIF_ASCII:
+        {
+            char *str = NULL;
+            str = (char *)malloc(count + 1);
+            if (str == NULL) {
+                ALOGE("%s: No memory for ascii string", __func__);
+                rc = NO_MEMORY;
+            } else {
+                memset(str, 0, count + 1);
+                memcpy(str, data, count);
+                m_Entries[m_nNumEntries].tag_entry.data._ascii = str;
+            }
+        }
+        break;
+    case EXIF_SHORT:
+        {
+            if (count > 1) {
+                uint16_t *values = (uint16_t *)malloc(count * sizeof(uint16_t));
+                if (values == NULL) {
+                    ALOGE("%s: No memory for short array", __func__);
+                    rc = NO_MEMORY;
+                } else {
+                    memcpy(values, data, count * sizeof(uint16_t));
+                    m_Entries[m_nNumEntries].tag_entry.data._shorts = values;
+                }
+            } else {
+                m_Entries[m_nNumEntries].tag_entry.data._short = *(uint16_t *)data;
+            }
+        }
+        break;
+    case EXIF_LONG:
+        {
+            if (count > 1) {
+                uint32_t *values = (uint32_t *)malloc(count * sizeof(uint32_t));
+                if (values == NULL) {
+                    ALOGE("%s: No memory for long array", __func__);
+                    rc = NO_MEMORY;
+                } else {
+                    memcpy(values, data, count * sizeof(uint32_t));
+                    m_Entries[m_nNumEntries].tag_entry.data._longs = values;
+                }
+            } else {
+                m_Entries[m_nNumEntries].tag_entry.data._long = *(uint32_t *)data;
+            }
+        }
+        break;
+    case EXIF_RATIONAL:
+        {
+            if (count > 1) {
+                rat_t *values = (rat_t *)malloc(count * sizeof(rat_t));
+                if (values == NULL) {
+                    ALOGE("%s: No memory for rational array", __func__);
+                    rc = NO_MEMORY;
+                } else {
+                    memcpy(values, data, count * sizeof(rat_t));
+                    m_Entries[m_nNumEntries].tag_entry.data._rats = values;
+                }
+            } else {
+                m_Entries[m_nNumEntries].tag_entry.data._rat = *(rat_t *)data;
+            }
+        }
+        break;
+    case EXIF_UNDEFINED:
+        {
+            uint8_t *values = (uint8_t *)malloc(count);
+            if (values == NULL) {
+                ALOGE("%s: No memory for undefined array", __func__);
+                rc = NO_MEMORY;
+            } else {
+                memcpy(values, data, count);
+                m_Entries[m_nNumEntries].tag_entry.data._undefined = values;
+            }
+        }
+        break;
+    case EXIF_SLONG:
+        {
+            if (count > 1) {
+                int32_t *values = (int32_t *)malloc(count * sizeof(int32_t));
+                if (values == NULL) {
+                    ALOGE("%s: No memory for signed long array", __func__);
+                    rc = NO_MEMORY;
+                } else {
+                    memcpy(values, data, count * sizeof(int32_t));
+                    m_Entries[m_nNumEntries].tag_entry.data._slongs = values;
+                }
+            } else {
+                m_Entries[m_nNumEntries].tag_entry.data._slong = *(int32_t *)data;
+            }
+        }
+        break;
+    case EXIF_SRATIONAL:
+        {
+            if (count > 1) {
+                srat_t *values = (srat_t *)malloc(count * sizeof(srat_t));
+                if (values == NULL) {
+                    ALOGE("%s: No memory for signed rational array", __func__);
+                    rc = NO_MEMORY;
+                } else {
+                    memcpy(values, data, count * sizeof(srat_t));
+                    m_Entries[m_nNumEntries].tag_entry.data._srats = values;
+                }
+            } else {
+                m_Entries[m_nNumEntries].tag_entry.data._srat = *(srat_t *)data;
+            }
+        }
+        break;
+    }
+
+    // Increase number of entries
+    m_nNumEntries++;
+    return rc;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL/QCameraPostProc.h b/camera/QCamera2/HAL/QCameraPostProc.h
new file mode 100644
index 0000000..88b0321
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraPostProc.h
@@ -0,0 +1,227 @@
+/* Copyright (c) 2012-2014, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_POSTPROC_H__
+#define __QCAMERA_POSTPROC_H__
+
+extern "C" {
+#include <mm_camera_interface.h>
+#include <mm_jpeg_interface.h>
+}
+#include "QCamera2HWI.h"
+
+#define MAX_JPEG_BURST 2
+
+namespace qcamera {
+
+class QCameraExif;
+
+typedef struct {
+    uint32_t jobId;                  // job ID
+    uint32_t client_hdl;             // handle of jpeg client (obtained when open jpeg)
+    mm_camera_super_buf_t *src_frame;// source frame (need to be returned back to kernel
+                                     //after done)
+    mm_camera_super_buf_t *src_reproc_frame; // original source
+                                             //frame for reproc if not NULL
+    metadata_buffer_t *metadata;     // source frame metadata
+    bool reproc_frame_release;       // false release original buffer, true don't release it
+    mm_camera_buf_def_t *src_reproc_bufs;
+    QCameraExif *pJpegExifObj;
+} qcamera_jpeg_data_t;
+
+
+typedef struct {
+    int8_t reprocCount;
+    mm_camera_super_buf_t *src_frame;    // source frame that needs post process
+    mm_camera_super_buf_t *src_reproc_frame;// source frame (need to be
+                                            //returned back to kernel after done)
+}qcamera_pp_request_t;
+
+typedef struct {
+    uint32_t jobId;                  // job ID
+    int8_t reprocCount;              //Current pass count
+    mm_camera_super_buf_t *src_frame;// source frame
+    bool reproc_frame_release;       // false release original buffer
+                                     // true don't release it
+    mm_camera_buf_def_t *src_reproc_bufs;
+    mm_camera_super_buf_t *src_reproc_frame;// source frame (need to be
+                                            //returned back to kernel after done)
+} qcamera_pp_data_t;
+
+typedef struct {
+    uint32_t jobId;                  // job ID (obtained when start_jpeg_job)
+    jpeg_job_status_t status;        // jpeg encoding status
+    mm_jpeg_output_t out_data;         // ptr to jpeg output buf
+} qcamera_jpeg_evt_payload_t;
+
+typedef struct {
+    camera_memory_t *        data;     // ptr to data memory struct
+    mm_camera_super_buf_t *  frame;    // ptr to frame
+    QCameraMemory *          streamBufs; //ptr to stream buffers
+    bool                     unlinkFile; // unlink any stored buffers on error
+} qcamera_release_data_t;
+
+typedef struct {
+    int32_t                  msg_type; // msg type of data notify
+    camera_memory_t *        data;     // ptr to data memory struct
+    unsigned int             index;    // index of the buf in the whole buffer
+    camera_frame_metadata_t *metadata; // ptr to meta data
+    qcamera_release_data_t   release_data; // any data needs to be release after notify
+} qcamera_data_argm_t;
+
+#define MAX_EXIF_TABLE_ENTRIES 17
+class QCameraExif
+{
+public:
+    QCameraExif();
+    virtual ~QCameraExif();
+
+    int32_t addEntry(exif_tag_id_t tagid,
+                     exif_tag_type_t type,
+                     uint32_t count,
+                     void *data);
+    uint32_t getNumOfEntries() {return m_nNumEntries;};
+    QEXIF_INFO_DATA *getEntries() {return m_Entries;};
+
+private:
+    QEXIF_INFO_DATA m_Entries[MAX_EXIF_TABLE_ENTRIES];  // exif tags for JPEG encoder
+    uint32_t  m_nNumEntries;                            // number of valid entries
+};
+
+class QCameraPostProcessor
+{
+public:
+    QCameraPostProcessor(QCamera2HardwareInterface *cam_ctrl);
+    virtual ~QCameraPostProcessor();
+
+    int32_t init(jpeg_encode_callback_t jpeg_cb, void *user_data);
+    int32_t deinit();
+    int32_t start(QCameraChannel *pSrcChannel);
+    int32_t stop();
+    bool validatePostProcess(mm_camera_super_buf_t *frame);
+    int32_t processData(mm_camera_super_buf_t *frame);
+    int32_t processRawData(mm_camera_super_buf_t *frame);
+    int32_t processPPData(mm_camera_super_buf_t *frame);
+    int32_t processJpegEvt(qcamera_jpeg_evt_payload_t *evt);
+    int32_t getJpegPaddingReq(cam_padding_info_t &padding_info);
+    QCameraReprocessChannel * getReprocChannel(uint8_t index);
+    inline bool getJpegMemOpt() {return mJpegMemOpt;}
+    inline void setJpegMemOpt(bool val) {mJpegMemOpt = val;}
+private:
+    int32_t sendDataNotify(int32_t msg_type,
+                           camera_memory_t *data,
+                           uint8_t index,
+                           camera_frame_metadata_t *metadata,
+                           qcamera_release_data_t *release_data);
+    int32_t sendEvtNotify(int32_t msg_type, int32_t ext1, int32_t ext2);
+    qcamera_jpeg_data_t *findJpegJobByJobId(uint32_t jobId);
+    mm_jpeg_color_format getColorfmtFromImgFmt(cam_format_t img_fmt);
+    mm_jpeg_format_t getJpegImgTypeFromImgFmt(cam_format_t img_fmt);
+    int32_t getJpegEncodingConfig(mm_jpeg_encode_params_t& encode_parm,
+                                  QCameraStream *main_stream,
+                                  QCameraStream *thumb_stream);
+    int32_t encodeData(qcamera_jpeg_data_t *jpeg_job_data,
+                       uint8_t &needNewSess);
+    int32_t queryStreams(QCameraStream **main,
+            QCameraStream **thumb,
+            QCameraStream **reproc,
+            mm_camera_buf_def_t **main_image,
+            mm_camera_buf_def_t **thumb_image,
+            mm_camera_super_buf_t *main_frame,
+            mm_camera_super_buf_t *reproc_frame);
+    int32_t syncStreamParams(mm_camera_super_buf_t *frame,
+            mm_camera_super_buf_t *reproc_frame);
+    void releaseSuperBuf(mm_camera_super_buf_t *super_buf);
+    static void releaseNotifyData(void *user_data,
+                                  void *cookie,
+                                  int32_t cb_status);
+    void releaseJpegJobData(qcamera_jpeg_data_t *job);
+    static void releaseSaveJobData(void *data, void *user_data);
+    static void releaseRawData(void *data, void *user_data);
+    int32_t processRawImageImpl(mm_camera_super_buf_t *recvd_frame);
+
+    static void releaseJpegData(void *data, void *user_data);
+    static void releasePPInputData(void *data, void *user_data);
+    static void releaseOngoingPPData(void *data, void *user_data);
+
+    static void *dataProcessRoutine(void *data);
+    static void *dataSaveRoutine(void *data);
+
+    int32_t setYUVFrameInfo(mm_camera_super_buf_t *recvd_frame);
+    static bool matchJobId(void *data, void *user_data, void *match_data);
+    static int getJpegMemory(omx_jpeg_ouput_buf_t *out_buf);
+
+    int32_t doReprocess();
+    int32_t stopCapture();
+
+private:
+    QCamera2HardwareInterface *m_parent;
+    jpeg_encode_callback_t     mJpegCB;
+    void *                     mJpegUserData;
+    mm_jpeg_ops_t              mJpegHandle;
+    uint32_t                   mJpegClientHandle;
+    uint32_t                   mJpegSessionId;
+
+    void *                     m_pJpegOutputMem[MM_JPEG_MAX_BUF];
+    QCameraExif *              m_pJpegExifObj;
+    uint32_t                   m_bThumbnailNeeded;
+
+    int8_t                     mTotalNumReproc;
+    QCameraReprocessChannel    *mPPChannels[CAM_QCOM_FEATURE_MAX];
+
+    camera_memory_t *          m_DataMem; // save frame mem pointer
+
+    int8_t                     m_bInited; // if postproc is inited
+
+    QCameraQueue m_inputPPQ;            // input queue for postproc
+    QCameraQueue m_ongoingPPQ;          // ongoing postproc queue
+    QCameraQueue m_inputJpegQ;          // input jpeg job queue
+    QCameraQueue m_ongoingJpegQ;        // ongoing jpeg job queue
+    QCameraQueue m_inputRawQ;           // input raw job queue
+    QCameraQueue m_inputSaveQ;          // input save job queue
+    QCameraCmdThread m_dataProcTh;      // thread for data processing
+    QCameraCmdThread m_saveProcTh;      // thread for storing buffers
+    uint32_t mSaveFrmCnt;               // save frame counter
+    static const char *STORE_LOCATION;  // path for storing buffers
+    bool mUseSaveProc;                  // use store thread
+    bool mUseJpegBurst;                 // use jpeg burst encoding mode
+    bool mJpegMemOpt;
+    uint32_t   m_JpegOutputMemCount;
+    uint8_t mNewJpegSessionNeeded;
+    int32_t m_bufCountPPQ;
+    Vector<mm_camera_buf_def_t *> m_InputMetadata; // store input metadata buffers for AOST cases
+    size_t m_PPindex;                   // counter for each incoming AOST buffer
+
+public:
+    cam_dimension_t m_dst_dim;
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA_POSTPROC_H__ */
diff --git a/camera/QCamera2/HAL/QCameraStateMachine.cpp b/camera/QCamera2/HAL/QCameraStateMachine.cpp
new file mode 100644
index 0000000..84c88d3
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraStateMachine.cpp
@@ -0,0 +1,3397 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define LOG_TAG "QCameraStateMachine"
+
+#include <utils/Errors.h>
+#include "QCamera2HWI.h"
+#include "QCameraStateMachine.h"
+
+namespace qcamera {
+
+/*===========================================================================
+ * FUNCTION   : smEvtProcRoutine
+ *
+ * DESCRIPTION: Statemachine process thread routine to handle events
+ *              in different state.
+ *
+ * PARAMETERS :
+ *   @data    : ptr to QCameraStateMachine object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void *QCameraStateMachine::smEvtProcRoutine(void *data)
+{
+    int running = 1, ret;
+    QCameraStateMachine *pme = (QCameraStateMachine *)data;
+
+    CDBG_HIGH("%s: E", __func__);
+    do {
+        do {
+            ret = cam_sem_wait(&pme->cmd_sem);
+            if (ret != 0 && errno != EINVAL) {
+                ALOGE("%s: cam_sem_wait error (%s)",
+                           __func__, strerror(errno));
+                return NULL;
+            }
+        } while (ret != 0);
+
+        // we got notified about new cmd avail in cmd queue
+        // first check API cmd queue
+        qcamera_sm_cmd_t *node = (qcamera_sm_cmd_t *)pme->api_queue.dequeue();
+        if (node == NULL) {
+            // no API cmd, then check evt cmd queue
+            node = (qcamera_sm_cmd_t *)pme->evt_queue.dequeue();
+        }
+        if (node != NULL) {
+            switch (node->cmd) {
+            case QCAMERA_SM_CMD_TYPE_API:
+                pme->stateMachine(node->evt, node->evt_payload);
+                // API is in a way sync call, so evt_payload is managed by HWI
+                // no need to free payload for API
+                break;
+            case QCAMERA_SM_CMD_TYPE_EVT:
+                pme->stateMachine(node->evt, node->evt_payload);
+
+                // EVT is async call, so payload need to be free after use
+                free(node->evt_payload);
+                node->evt_payload = NULL;
+                break;
+            case QCAMERA_SM_CMD_TYPE_EXIT:
+                running = 0;
+                break;
+            default:
+                break;
+            }
+            free(node);
+            node = NULL;
+        }
+    } while (running);
+    CDBG_HIGH("%s: X", __func__);
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraStateMachine
+ *
+ * DESCRIPTION: constructor of QCameraStateMachine. Will start process thread
+ *
+ * PARAMETERS :
+ *   @ctrl    : ptr to HWI object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraStateMachine::QCameraStateMachine(QCamera2HardwareInterface *ctrl) :
+    api_queue(),
+    evt_queue()
+{
+    m_parent = ctrl;
+    m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+    cmd_pid = 0;
+    cam_sem_init(&cmd_sem, 0);
+    pthread_create(&cmd_pid,
+                   NULL,
+                   smEvtProcRoutine,
+                   this);
+    pthread_setname_np(cmd_pid, "CAM_stMachine");
+    m_bDelayPreviewMsgs = false;
+    m_DelayedMsgs = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraStateMachine
+ *
+ * DESCRIPTION: desctructor of QCameraStateMachine. Will stop process thread.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCameraStateMachine::~QCameraStateMachine()
+{
+    cam_sem_destroy(&cmd_sem);
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseThread
+ *
+ * DESCRIPTION: Sends an exit command and terminates the state machine thread
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraStateMachine::releaseThread()
+{
+    if (cmd_pid != 0) {
+        qcamera_sm_cmd_t *node =
+            (qcamera_sm_cmd_t *)malloc(sizeof(qcamera_sm_cmd_t));
+        if (NULL != node) {
+            memset(node, 0, sizeof(qcamera_sm_cmd_t));
+            node->cmd = QCAMERA_SM_CMD_TYPE_EXIT;
+
+            if (api_queue.enqueue((void *)node)) {
+                cam_sem_post(&cmd_sem);
+            } else {
+                free(node);
+                node = NULL;
+            }
+
+            /* wait until cmd thread exits */
+            if (pthread_join(cmd_pid, NULL) != 0) {
+                CDBG_HIGH("%s: pthread dead already\n", __func__);
+            }
+        }
+        cmd_pid = 0;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : applyDelayedMsgs
+ *
+ * DESCRIPTION: Enable if needed any delayed message types
+ *
+ * PARAMETERS : None
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::applyDelayedMsgs()
+{
+    int32_t rc = NO_ERROR;
+
+    if (m_bDelayPreviewMsgs && m_DelayedMsgs) {
+        rc = m_parent->enableMsgType(m_DelayedMsgs);
+        m_bDelayPreviewMsgs = false;
+        m_DelayedMsgs = 0;
+    } else if (m_bDelayPreviewMsgs) {
+        m_bDelayPreviewMsgs = false;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : procAPI
+ *
+ * DESCRIPTION: process incoming API request from framework layer.
+ *
+ * PARAMETERS :
+ *   @evt          : event to be processed
+ *   @api_payload  : API payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procAPI(qcamera_sm_evt_enum_t evt,
+                                     void *api_payload)
+{
+    qcamera_sm_cmd_t *node =
+        (qcamera_sm_cmd_t *)malloc(sizeof(qcamera_sm_cmd_t));
+    if (NULL == node) {
+        ALOGE("%s: No memory for qcamera_sm_cmd_t", __func__);
+        return NO_MEMORY;
+    }
+
+    memset(node, 0, sizeof(qcamera_sm_cmd_t));
+    node->cmd = QCAMERA_SM_CMD_TYPE_API;
+    node->evt = evt;
+    node->evt_payload = api_payload;
+    if (api_queue.enqueue((void *)node)) {
+        cam_sem_post(&cmd_sem);
+        return NO_ERROR;
+    } else {
+        free(node);
+        return UNKNOWN_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : procEvt
+ *
+ * DESCRIPTION: process incoming envent from mm-camera-interface and
+ *              mm-jpeg-interface.
+ *
+ * PARAMETERS :
+ *   @evt          : event to be processed
+ *   @evt_payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procEvt(qcamera_sm_evt_enum_t evt,
+                                     void *evt_payload)
+{
+    qcamera_sm_cmd_t *node =
+        (qcamera_sm_cmd_t *)malloc(sizeof(qcamera_sm_cmd_t));
+    if (NULL == node) {
+        ALOGE("%s: No memory for qcamera_sm_cmd_t", __func__);
+        return NO_MEMORY;
+    }
+
+    memset(node, 0, sizeof(qcamera_sm_cmd_t));
+    node->cmd = QCAMERA_SM_CMD_TYPE_EVT;
+    node->evt = evt;
+    node->evt_payload = evt_payload;
+    if (evt_queue.enqueue((void *)node)) {
+        cam_sem_post(&cmd_sem);
+        return NO_ERROR;
+    } else {
+        free(node);
+        return UNKNOWN_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : stateMachine
+ *
+ * DESCRIPTION: finite state machine entry function. Depends on state,
+ *              incoming event will be handled differently.
+ *
+ * PARAMETERS :
+ *   @evt      : event to be processed
+ *   @payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::stateMachine(qcamera_sm_evt_enum_t evt, void *payload)
+{
+    int32_t rc = NO_ERROR;
+    ALOGV("%s: m_state %d, event (%d)", __func__, m_state, evt);
+    switch (m_state) {
+    case QCAMERA_SM_STATE_PREVIEW_STOPPED:
+        rc = procEvtPreviewStoppedState(evt, payload);
+        break;
+    case QCAMERA_SM_STATE_PREVIEW_READY:
+        rc = procEvtPreviewReadyState(evt, payload);
+        break;
+    case QCAMERA_SM_STATE_PREVIEWING:
+        rc = procEvtPreviewingState(evt, payload);
+        break;
+    case QCAMERA_SM_STATE_PREPARE_SNAPSHOT:
+        rc = procEvtPrepareSnapshotState(evt, payload);
+        break;
+    case QCAMERA_SM_STATE_PIC_TAKING:
+        rc = procEvtPicTakingState(evt, payload);
+        break;
+    case QCAMERA_SM_STATE_RECORDING:
+        rc = procEvtRecordingState(evt, payload);
+        break;
+    case QCAMERA_SM_STATE_VIDEO_PIC_TAKING:
+        rc = procEvtVideoPicTakingState(evt, payload);
+        break;
+    case QCAMERA_SM_STATE_PREVIEW_PIC_TAKING:
+        rc = procEvtPreviewPicTakingState(evt, payload);
+        break;
+    default:
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : procEvtPreviewStoppedState
+ *
+ * DESCRIPTION: finite state machine function to handle event in state of
+ *              QCAMERA_SM_STATE_PREVIEW_STOPPED.
+ *
+ * PARAMETERS :
+ *   @evt      : event to be processed
+ *   @payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procEvtPreviewStoppedState(qcamera_sm_evt_enum_t evt,
+                                                        void *payload)
+{
+    int32_t rc = NO_ERROR;
+    qcamera_api_result_t result;
+    memset(&result, 0, sizeof(qcamera_api_result_t));
+
+    ALOGV("%s: event (%d)", __func__, evt);
+    switch (evt) {
+    case QCAMERA_SM_EVT_SET_PREVIEW_WINDOW:
+        {
+            rc = m_parent->setPreviewWindow((struct preview_stream_ops *)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_CALLBACKS:
+        {
+            qcamera_sm_evt_setcb_payload_t *setcbs =
+                (qcamera_sm_evt_setcb_payload_t *)payload;
+            rc = m_parent->setCallBacks(setcbs->notify_cb,
+                                        setcbs->data_cb,
+                                        setcbs->data_cb_timestamp,
+                                        setcbs->get_memory,
+                                        setcbs->user);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_ENABLE_MSG_TYPE:
+        {
+            rc = m_parent->enableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DISABLE_MSG_TYPE:
+        {
+            rc = m_parent->disableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_MSG_TYPE_ENABLED:
+        {
+            int enabled = m_parent->msgTypeEnabled(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = enabled;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_PARAMS:
+        {
+            bool needRestart = false;
+            rc = m_parent->updateParameters((char*)payload, needRestart);
+            if (needRestart) {
+                // Clear memory pools
+                m_parent->m_memoryPool.clear();
+            }
+            if (rc == NO_ERROR) {
+                rc = m_parent->commitParameterChanges();
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_GET_PARAMS:
+        {
+            result.params = m_parent->getParameters();
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_PARAMS;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PUT_PARAMS:
+        {
+            rc = m_parent->putParameters((char*)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_PREVIEW:
+        {
+            if (m_parent->mPreviewWindow == NULL) {
+                rc = m_parent->preparePreview();
+                if(rc == NO_ERROR) {
+                    // preview window is not set yet, move to previewReady state
+                    m_state = QCAMERA_SM_STATE_PREVIEW_READY;
+                } else {
+                    ALOGE("%s: preparePreview failed",__func__);
+                }
+            } else {
+                rc = m_parent->preparePreview();
+                if (rc == NO_ERROR) {
+                    applyDelayedMsgs();
+                    rc = m_parent->startPreview();
+                    if (rc != NO_ERROR) {
+                        m_parent->unpreparePreview();
+                    } else {
+                        // start preview success, move to previewing state
+                        m_state = QCAMERA_SM_STATE_PREVIEWING;
+                    }
+                }
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW:
+        {
+            rc = m_parent->preparePreview();
+            if (rc == NO_ERROR) {
+                applyDelayedMsgs();
+                rc = m_parent->startPreview();
+                if (rc != NO_ERROR) {
+                    m_parent->unpreparePreview();
+                } else {
+                    m_state = QCAMERA_SM_STATE_PREVIEWING;
+                }
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+    break;
+    case QCAMERA_SM_EVT_STOP_PREVIEW:
+        {
+            // no op needed here
+            CDBG_HIGH("%s: already in preview stopped state, do nothing", __func__);
+            result.status = NO_ERROR;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREVIEW_ENABLED:
+    case QCAMERA_SM_EVT_RECORDING_ENABLED:
+        {
+            result.status = NO_ERROR;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 0;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RELEASE:
+        {
+            rc = m_parent->release();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS:
+        {
+            rc = m_parent->storeMetaDataInBuffers(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DUMP:
+        {
+            rc = m_parent->dump(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SEND_COMMAND:
+        {
+            qcamera_sm_evt_command_payload_t *cmd_payload =
+                (qcamera_sm_evt_command_payload_t *)payload;
+            rc = m_parent->sendCommand(cmd_payload->cmd,
+                                       cmd_payload->arg1,
+                                       cmd_payload->arg2);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_RECORDING:
+    case QCAMERA_SM_EVT_STOP_RECORDING:
+    case QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME:
+    case QCAMERA_SM_EVT_PREPARE_SNAPSHOT:
+    case QCAMERA_SM_EVT_TAKE_PICTURE:
+        {
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_AUTO_FOCUS:
+    case QCAMERA_SM_EVT_CANCEL_PICTURE:
+        {
+            // no op needed here
+            CDBG_HIGH("%s: No ops for evt(%d) in state(%d)", __func__, evt, m_state);
+            result.status = NO_ERROR;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_AUTO_FOCUS:
+        {
+            rc = m_parent->cancelAutoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_REG_FACE_IMAGE:
+        {
+            int32_t faceID = 0;
+            qcamera_sm_evt_reg_face_payload_t *reg_payload =
+                (qcamera_sm_evt_reg_face_payload_t *)payload;
+            rc = m_parent->registerFaceImage(reg_payload->img_ptr,
+                                             reg_payload->config,
+                                             faceID);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_HANDLE;
+            result.handle = faceID;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_THERMAL_NOTIFY:
+        {
+            rc = m_parent->updateThermalLevel(payload);
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_NOTIFY:
+        {
+            mm_camera_event_t *cam_evt = (mm_camera_event_t *)payload;
+            switch (cam_evt->server_event_type) {
+            case CAM_EVENT_TYPE_DAEMON_DIED:
+                {
+                    //close the camera backend
+                    mm_camera_vtbl_t* handle = m_parent->mCameraHandle;
+                    if (handle && handle->ops) {
+                        handle->ops->error_close_camera(handle->camera_handle);
+                    } else {
+                        ALOGE("%s: Could not close because the handle or ops is NULL",
+                                __func__);
+                    }
+                    m_parent->sendEvtNotify(CAMERA_MSG_ERROR,
+                                            CAMERA_ERROR_SERVER_DIED,
+                                            0);
+                }
+                break;
+            default:
+                ALOGE("%s: Invalid internal event %d in state(%d)",
+                            __func__, cam_evt->server_event_type, m_state);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_SNAPSHOT_DONE:
+        {
+            // No ops, but need to notify
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalEvtResult(&result);
+        }
+       break;
+    case QCAMERA_SM_EVT_EVT_INTERNAL:
+       {
+           qcamera_sm_internal_evt_payload_t *internal_evt =
+               (qcamera_sm_internal_evt_payload_t *)payload;
+           switch (internal_evt->evt_type) {
+           case QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE:
+               rc = m_parent->mParameters.updateFlashMode(internal_evt->led_data);
+               break;
+           default:
+               ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+               break;
+           }
+       }
+       break;
+    case QCAMERA_SM_EVT_JPEG_EVT_NOTIFY:
+    default:
+        ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : procEvtPreviewReadyState
+ *
+ * DESCRIPTION: finite state machine function to handle event in state of
+ *              QCAMERA_SM_STATE_PREVIEW_READY.
+ *
+ * PARAMETERS :
+ *   @evt      : event to be processed
+ *   @payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procEvtPreviewReadyState(qcamera_sm_evt_enum_t evt,
+                                                      void *payload)
+{
+    int32_t rc = NO_ERROR;
+    qcamera_api_result_t result;
+    memset(&result, 0, sizeof(qcamera_api_result_t));
+
+    ALOGV("%s: event (%d)", __func__, evt);
+    switch (evt) {
+    case QCAMERA_SM_EVT_SET_PREVIEW_WINDOW:
+        {
+            m_parent->setPreviewWindow((struct preview_stream_ops *)payload);
+            if (m_parent->mPreviewWindow != NULL) {
+                applyDelayedMsgs();
+                rc = m_parent->startPreview();
+                if (rc != NO_ERROR) {
+                    m_parent->unpreparePreview();
+                    m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+                } else {
+                    m_state = QCAMERA_SM_STATE_PREVIEWING;
+                }
+            }
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_CALLBACKS:
+        {
+            qcamera_sm_evt_setcb_payload_t *setcbs =
+                (qcamera_sm_evt_setcb_payload_t *)payload;
+            rc = m_parent->setCallBacks(setcbs->notify_cb,
+                                        setcbs->data_cb,
+                                        setcbs->data_cb_timestamp,
+                                        setcbs->get_memory,
+                                        setcbs->user);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_ENABLE_MSG_TYPE:
+        {
+            rc = m_parent->enableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DISABLE_MSG_TYPE:
+        {
+            rc = m_parent->disableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_MSG_TYPE_ENABLED:
+        {
+            int enabled = m_parent->msgTypeEnabled(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = enabled;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_PARAMS:
+        {
+            bool needRestart = false;
+            rc = m_parent->updateParameters((char*)payload, needRestart);
+            if (rc == NO_ERROR) {
+                if (needRestart) {
+                    // need restart preview for parameters to take effect
+                    m_parent->unpreparePreview();
+                    // Clear memory pools
+                    m_parent->m_memoryPool.clear();
+                    // commit parameter changes to server
+                    m_parent->commitParameterChanges();
+                    // prepare preview again
+                    rc = m_parent->preparePreview();
+                    if (rc != NO_ERROR) {
+                        m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+                    }
+                } else {
+                    rc = m_parent->commitParameterChanges();
+                }
+            }
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_GET_PARAMS:
+        {
+            result.params = m_parent->getParameters();
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_PARAMS;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PUT_PARAMS:
+        {
+            rc = m_parent->putParameters((char*)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_PREVIEW:
+        {
+            // no ops here
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_PREVIEW:
+        {
+            m_parent->unpreparePreview();
+            rc = 0;
+            m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREVIEW_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 1;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RECORDING_ENABLED:
+        {
+            rc = 0;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 0;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS:
+        {
+            rc = m_parent->storeMetaDataInBuffers(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DUMP:
+        {
+            rc = m_parent->dump(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_AUTO_FOCUS:
+        {
+            rc = m_parent->autoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_AUTO_FOCUS:
+        {
+            rc = m_parent->cancelAutoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SEND_COMMAND:
+        {
+            qcamera_sm_evt_command_payload_t *cmd_payload =
+                (qcamera_sm_evt_command_payload_t *)payload;
+            rc = m_parent->sendCommand(cmd_payload->cmd,
+                                       cmd_payload->arg1,
+                                       cmd_payload->arg2);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_REG_FACE_IMAGE:
+        {
+            int32_t faceID = 0;
+            qcamera_sm_evt_reg_face_payload_t *reg_payload =
+                (qcamera_sm_evt_reg_face_payload_t *)payload;
+            rc = m_parent->registerFaceImage(reg_payload->img_ptr,
+                                             reg_payload->config,
+                                             faceID);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_HANDLE;
+            result.handle = faceID;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW:
+    case QCAMERA_SM_EVT_START_RECORDING:
+    case QCAMERA_SM_EVT_STOP_RECORDING:
+    case QCAMERA_SM_EVT_PREPARE_SNAPSHOT:
+    case QCAMERA_SM_EVT_TAKE_PICTURE:
+    case QCAMERA_SM_EVT_CANCEL_PICTURE:
+    case QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME:
+    case QCAMERA_SM_EVT_RELEASE:
+        {
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_NOTIFY:
+        {
+            mm_camera_event_t *cam_evt = (mm_camera_event_t *)payload;
+            switch (cam_evt->server_event_type) {
+            case CAM_EVENT_TYPE_DAEMON_DIED:
+                {
+                    //close the camera backend
+                    mm_camera_vtbl_t* handle = m_parent->mCameraHandle;
+                    if (handle && handle->ops) {
+                        handle->ops->error_close_camera(handle->camera_handle);
+                    } else {
+                        ALOGE("%s: Could not close because the handle or ops is NULL",
+                                __func__);
+                    }
+                    m_parent->sendEvtNotify(CAMERA_MSG_ERROR,
+                                            CAMERA_ERROR_SERVER_DIED,
+                                            0);
+                }
+                break;
+            default:
+                ALOGE("%s: Invalid internal event %d in state(%d)",
+                            __func__, cam_evt->server_event_type, m_state);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_SNAPSHOT_DONE:
+        {
+            // No ops, but need to notify
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalEvtResult(&result);
+        }
+       break;
+    case QCAMERA_SM_EVT_EVT_INTERNAL:
+       {
+           qcamera_sm_internal_evt_payload_t *internal_evt =
+                   (qcamera_sm_internal_evt_payload_t *)payload;
+           switch (internal_evt->evt_type) {
+           case QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE:
+               rc = m_parent->mParameters.updateFlashMode(internal_evt->led_data);
+               break;
+           default:
+               ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+               break;
+           }
+       }
+       break;
+    case QCAMERA_SM_EVT_JPEG_EVT_NOTIFY:
+    case QCAMERA_SM_EVT_THERMAL_NOTIFY:
+    default:
+        ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : procEvtPreviewingState
+ *
+ * DESCRIPTION: finite state machine function to handle event in state of
+ *              QCAMERA_SM_STATE_PREVIEWING.
+ *
+ * PARAMETERS :
+ *   @evt      : event to be processed
+ *   @payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procEvtPreviewingState(qcamera_sm_evt_enum_t evt,
+                                                    void *payload)
+{
+    int32_t rc = NO_ERROR;
+    qcamera_api_result_t result;
+    memset(&result, 0, sizeof(qcamera_api_result_t));
+
+    ALOGV("%s: event (%d)", __func__, evt);
+    switch (evt) {
+    case QCAMERA_SM_EVT_SET_PREVIEW_WINDOW:
+        {
+            // Error setting preview window during previewing
+            ALOGE("Error!! cannot set preview window when preview is running");
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_CALLBACKS:
+        {
+            qcamera_sm_evt_setcb_payload_t *setcbs =
+                (qcamera_sm_evt_setcb_payload_t *)payload;
+            rc = m_parent->setCallBacks(setcbs->notify_cb,
+                                        setcbs->data_cb,
+                                        setcbs->data_cb_timestamp,
+                                        setcbs->get_memory,
+                                        setcbs->user);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_ENABLE_MSG_TYPE:
+        {
+            int32_t enable_msgs = *((int32_t *)payload);
+            if (m_bDelayPreviewMsgs &&
+                    (enable_msgs & CAMERA_MSG_PREVIEW_FRAME)) {
+                enable_msgs &= ~CAMERA_MSG_PREVIEW_FRAME;
+                m_DelayedMsgs = CAMERA_MSG_PREVIEW_FRAME;
+            }
+            rc = m_parent->enableMsgType(enable_msgs);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DISABLE_MSG_TYPE:
+        {
+            int32_t disable_msgs = *((int32_t *)payload);
+            if (m_bDelayPreviewMsgs && m_DelayedMsgs) {
+                m_DelayedMsgs &= ~disable_msgs;
+                if (0 == m_DelayedMsgs) {
+                    m_bDelayPreviewMsgs = false;
+                }
+            }
+            rc = m_parent->disableMsgType(disable_msgs);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_MSG_TYPE_ENABLED:
+        {
+            int32_t msgs = *((int32_t *)payload);
+            int enabled = m_parent->msgTypeEnabled(msgs);
+            if (m_bDelayPreviewMsgs && m_DelayedMsgs) {
+                enabled |= (msgs & m_DelayedMsgs);
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = enabled;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_PARAMS:
+        {
+            bool needRestart = false;
+            rc = m_parent->updateParameters((char*)payload, needRestart);
+            if (rc == NO_ERROR) {
+                if (needRestart) {
+                    // need restart preview for parameters to take effect
+                    // stop preview
+                    m_parent->stopPreview();
+                    // Clear memory pools
+                    m_parent->m_memoryPool.clear();
+                    // commit parameter changes to server
+                    m_parent->commitParameterChanges();
+                    // start preview again
+                    rc = m_parent->preparePreview();
+                    if (rc == NO_ERROR) {
+                        applyDelayedMsgs();
+                        rc = m_parent->startPreview();
+                        if (rc != NO_ERROR) {
+                            m_parent->unpreparePreview();
+                        }
+                    }
+                    if (rc != NO_ERROR) {
+                        m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+                    }
+                } else {
+                    rc = m_parent->commitParameterChanges();
+                }
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_GET_PARAMS:
+        {
+            result.params = m_parent->getParameters();
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_PARAMS;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PUT_PARAMS:
+        {
+            rc = m_parent->putParameters((char*)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_PREVIEW:
+    case QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW:
+        {
+            // no ops here
+            CDBG_HIGH("%s: Already in previewing, no ops here to start preview", __func__);
+            applyDelayedMsgs();
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_PREVIEW:
+        {
+            rc = m_parent->stopPreview();
+            applyDelayedMsgs();
+            m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREVIEW_ENABLED:
+        {
+            applyDelayedMsgs();
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 1;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RECORDING_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 0;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS:
+        {
+            rc = m_parent->storeMetaDataInBuffers(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DUMP:
+        {
+            rc = m_parent->dump(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_AUTO_FOCUS:
+        {
+            rc = m_parent->autoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_AUTO_FOCUS:
+        {
+            rc = m_parent->cancelAutoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_RECORDING:
+        {
+            rc = m_parent->startRecording();
+            if (rc == NO_ERROR) {
+                // move state to recording state
+                m_state = QCAMERA_SM_STATE_RECORDING;
+                applyDelayedMsgs();
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREPARE_SNAPSHOT:
+        {
+            rc = m_parent->prepareHardwareForSnapshot(FALSE);
+            if (rc == NO_ERROR) {
+                // Do not signal API result in this case.
+                // Need to wait for snapshot done in metadta.
+                m_state = QCAMERA_SM_STATE_PREPARE_SNAPSHOT;
+                applyDelayedMsgs();
+            } else {
+                // Do not change state in this case.
+                ALOGE("%s: prepareHardwareForSnapshot failed %d",
+                    __func__, rc);
+
+                result.status = rc;
+                result.request_api = evt;
+                result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                m_parent->signalAPIResult(&result);
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_TAKE_PICTURE:
+       {
+
+           ALOGV("%s: QCAMERA_SM_EVT_TAKE_PICTURE ", __func__);
+           if ( m_parent->mParameters.getRecordingHintValue() == false) {
+               if (m_parent->isZSLMode() || m_parent->isLongshotEnabled()) {
+                   m_state = QCAMERA_SM_STATE_PREVIEW_PIC_TAKING;
+                   m_bDelayPreviewMsgs = true;
+                   rc = m_parent->takePicture();
+                   if (rc != NO_ERROR) {
+                       // move state to previewing state
+                       m_state = QCAMERA_SM_STATE_PREVIEWING;
+                   }
+                   if (!(m_parent->isRetroPicture()) || (rc != NO_ERROR)) {
+                       ALOGD("%s: signal API result, m_state = %d",
+                             __func__, m_state);
+                       result.status = rc;
+                       result.request_api = evt;
+                       result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                       m_parent->signalAPIResult(&result);
+                   }
+               } else {
+                   m_state = QCAMERA_SM_STATE_PIC_TAKING;
+                   rc = m_parent->takePicture();
+                   if (rc != NO_ERROR) {
+                       // move state to preview stopped state
+                       m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+                   }
+
+                   result.status = rc;
+                   result.request_api = evt;
+                   result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                   m_parent->signalAPIResult(&result);
+               }
+           } else {
+               m_state = QCAMERA_SM_STATE_PREVIEW_PIC_TAKING;
+               rc = m_parent->takeLiveSnapshot();
+               if (rc != NO_ERROR ) {
+                   m_state = QCAMERA_SM_STATE_PREVIEWING;
+               }
+               result.status = rc;
+               result.request_api = evt;
+               result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+               m_parent->signalAPIResult(&result);
+           }
+        }
+        break;
+    case QCAMERA_SM_EVT_SEND_COMMAND:
+        {
+            qcamera_sm_evt_command_payload_t *cmd_payload =
+                (qcamera_sm_evt_command_payload_t *)payload;
+            rc = m_parent->sendCommand(cmd_payload->cmd,
+                                       cmd_payload->arg1,
+                                       cmd_payload->arg2);
+#ifndef VANILLA_HAL
+            if (CAMERA_CMD_LONGSHOT_ON == cmd_payload->cmd) {
+                if (QCAMERA_SM_EVT_RESTART_PERVIEW == cmd_payload->arg1) {
+                    m_parent->stopPreview();
+                    // Clear memory pools
+                    m_parent->m_memoryPool.clear();
+                    // start preview again
+                    rc = m_parent->preparePreview();
+                    if (rc == NO_ERROR) {
+                        applyDelayedMsgs();
+                        rc = m_parent->startPreview();
+                        if (rc != NO_ERROR) {
+                            m_parent->unpreparePreview();
+                        }
+                    }
+                }
+            }
+#endif
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_REG_FACE_IMAGE:
+        {
+            int32_t faceID = 0;
+            qcamera_sm_evt_reg_face_payload_t *reg_payload =
+                (qcamera_sm_evt_reg_face_payload_t *)payload;
+            rc = m_parent->registerFaceImage(reg_payload->img_ptr,
+                                             reg_payload->config,
+                                             faceID);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_HANDLE;
+            result.handle = faceID;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_CANCEL_PICTURE:
+    case QCAMERA_SM_EVT_STOP_RECORDING:
+    case QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME:
+    case QCAMERA_SM_EVT_RELEASE:
+        {
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_INTERNAL:
+        {
+            qcamera_sm_internal_evt_payload_t *internal_evt =
+                (qcamera_sm_internal_evt_payload_t *)payload;
+            switch (internal_evt->evt_type) {
+            case QCAMERA_INTERNAL_EVT_FOCUS_UPDATE:
+                rc = m_parent->processAutoFocusEvent(internal_evt->focus_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE:
+                break;
+            case QCAMERA_INTERNAL_EVT_FACE_DETECT_RESULT:
+                rc = m_parent->processFaceDetectionResult(&internal_evt->faces_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_HISTOGRAM_STATS:
+                rc = m_parent->processHistogramStats(internal_evt->stats_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_CROP_INFO:
+                rc = m_parent->processZoomEvent(internal_evt->crop_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_ASD_UPDATE:
+                rc = m_parent->processASDUpdate(internal_evt->asd_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE:
+                rc = m_parent->mParameters.updateFlashMode(internal_evt->led_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_AWB_UPDATE:
+                rc = m_parent->transAwbMetaToParams(internal_evt->awb_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_AE_UPDATE:
+                rc = m_parent->processAEInfo(internal_evt->ae_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_FOCUS_POS_UPDATE:
+                rc = m_parent->processFocusPositionInfo(internal_evt->focus_pos);
+                break;
+            case QCAMERA_INTERNAL_EVT_HDR_UPDATE:
+                rc = m_parent->processHDRData(internal_evt->hdr_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_RETRO_AEC_UNLOCK:
+                rc = m_parent->processRetroAECUnlock();
+                break;
+            case QCAMERA_INTERNAL_EVT_ZSL_CAPTURE_DONE:
+                rc = m_parent->processZSLCaptureDone();
+                break;
+            default:
+                ALOGE("%s: Invalid internal event %d in state(%d)",
+                            __func__, internal_evt->evt_type, m_state);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_NOTIFY:
+        {
+            mm_camera_event_t *cam_evt = (mm_camera_event_t *)payload;
+            switch (cam_evt->server_event_type) {
+            case CAM_EVENT_TYPE_DAEMON_DIED:
+                {
+                    //close the camera backend
+                    mm_camera_vtbl_t* handle = m_parent->mCameraHandle;
+                    if (handle && handle->ops) {
+                        handle->ops->error_close_camera(handle->camera_handle);
+                    } else {
+                        ALOGE("%s: Could not close because the handle or ops is NULL",
+                                __func__);
+                    }
+                    m_parent->sendEvtNotify(CAMERA_MSG_ERROR,
+                                            CAMERA_ERROR_SERVER_DIED,
+                                            0);
+                }
+                break;
+            default:
+                CDBG_HIGH("%s: no handling for server evt (%d) at this state",
+                      __func__, cam_evt->server_event_type);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_THERMAL_NOTIFY:
+        {
+            rc = m_parent->updateThermalLevel(payload);
+        }
+        break;
+    case QCAMERA_SM_EVT_SNAPSHOT_DONE:
+        {
+            // No ops, but need to notify
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalEvtResult(&result);
+        }
+       break;
+    case QCAMERA_SM_EVT_JPEG_EVT_NOTIFY:
+    default:
+        ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : procEvtPrepareSnapshotState
+ *
+ * DESCRIPTION: finite state machine function to handle event in state of
+ *              QCAMERA_SM_STATE_PREPARE_SNAPSHOT.
+ *
+ * PARAMETERS :
+ *   @evt      : event to be processed
+ *   @payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procEvtPrepareSnapshotState(qcamera_sm_evt_enum_t evt,
+                                                    void *payload)
+{
+    int32_t rc = NO_ERROR;
+    qcamera_api_result_t result;
+    memset(&result, 0, sizeof(qcamera_api_result_t));
+
+    ALOGV("%s: event (%d)", __func__, evt);
+    switch (evt) {
+    case QCAMERA_SM_EVT_SET_PREVIEW_WINDOW:
+    case QCAMERA_SM_EVT_SET_CALLBACKS:
+    case QCAMERA_SM_EVT_ENABLE_MSG_TYPE:
+    case QCAMERA_SM_EVT_DISABLE_MSG_TYPE:
+    case QCAMERA_SM_EVT_MSG_TYPE_ENABLED:
+    case QCAMERA_SM_EVT_SET_PARAMS:
+    case QCAMERA_SM_EVT_GET_PARAMS:
+    case QCAMERA_SM_EVT_PUT_PARAMS:
+    case QCAMERA_SM_EVT_START_PREVIEW:
+    case QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW:
+    case QCAMERA_SM_EVT_STOP_PREVIEW:
+    case QCAMERA_SM_EVT_PREVIEW_ENABLED:
+    case QCAMERA_SM_EVT_RECORDING_ENABLED:
+    case QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS:
+    case QCAMERA_SM_EVT_DUMP:
+    case QCAMERA_SM_EVT_START_AUTO_FOCUS:
+    case QCAMERA_SM_EVT_STOP_AUTO_FOCUS:
+    case QCAMERA_SM_EVT_START_RECORDING:
+    case QCAMERA_SM_EVT_TAKE_PICTURE:
+    case QCAMERA_SM_EVT_PREPARE_SNAPSHOT:
+    case QCAMERA_SM_EVT_SEND_COMMAND:
+    case QCAMERA_SM_EVT_CANCEL_PICTURE:
+    case QCAMERA_SM_EVT_STOP_RECORDING:
+    case QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME:
+    case QCAMERA_SM_EVT_RELEASE:
+        {
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_INTERNAL:
+        {
+            qcamera_sm_internal_evt_payload_t *internal_evt =
+                (qcamera_sm_internal_evt_payload_t *)payload;
+            switch (internal_evt->evt_type) {
+            case QCAMERA_INTERNAL_EVT_FOCUS_UPDATE:
+                rc = m_parent->processAutoFocusEvent(internal_evt->focus_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE:
+                CDBG("%s: Received QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE event",
+                    __func__);
+                m_parent->processPrepSnapshotDoneEvent(internal_evt->prep_snapshot_state);
+                m_state = QCAMERA_SM_STATE_PREVIEWING;
+
+                result.status = NO_ERROR;
+                result.request_api = QCAMERA_SM_EVT_PREPARE_SNAPSHOT;
+                result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                m_parent->signalAPIResult(&result);
+                break;
+            case QCAMERA_INTERNAL_EVT_FACE_DETECT_RESULT:
+                rc = m_parent->processFaceDetectionResult(&internal_evt->faces_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_HISTOGRAM_STATS:
+                rc = m_parent->processHistogramStats(internal_evt->stats_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_CROP_INFO:
+                rc = m_parent->processZoomEvent(internal_evt->crop_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_ASD_UPDATE:
+                rc = m_parent->processASDUpdate(internal_evt->asd_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE:
+                ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+                break;
+            case QCAMERA_INTERNAL_EVT_AWB_UPDATE:
+                rc = m_parent->transAwbMetaToParams(internal_evt->awb_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_AE_UPDATE:
+                rc = m_parent->processAEInfo(internal_evt->ae_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_FOCUS_POS_UPDATE:
+                rc = m_parent->processFocusPositionInfo(internal_evt->focus_pos);
+                break;
+            case QCAMERA_INTERNAL_EVT_HDR_UPDATE:
+                rc = m_parent->processHDRData(internal_evt->hdr_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_RETRO_AEC_UNLOCK:
+                rc = m_parent->processRetroAECUnlock();
+                break;
+            case QCAMERA_INTERNAL_EVT_ZSL_CAPTURE_DONE:
+                rc = m_parent->processZSLCaptureDone();
+                break;
+            default:
+                ALOGE("%s: Invalid internal event %d in state(%d)",
+                            __func__, internal_evt->evt_type, m_state);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_NOTIFY:
+        {
+            mm_camera_event_t *cam_evt = (mm_camera_event_t *)payload;
+            switch (cam_evt->server_event_type) {
+            case CAM_EVENT_TYPE_DAEMON_DIED:
+                {
+                    //close the camera backend
+                    mm_camera_vtbl_t* handle = m_parent->mCameraHandle;
+                    if (handle && handle->ops) {
+                        handle->ops->error_close_camera(handle->camera_handle);
+                    } else {
+                        ALOGE("%s: Could not close because the handle or ops is NULL",
+                                __func__);
+                    }
+                    m_parent->sendEvtNotify(CAMERA_MSG_ERROR,
+                                            CAMERA_ERROR_SERVER_DIED,
+                                            0);
+                }
+                break;
+            default:
+                ALOGE("%s: Invalid internal event %d in state(%d)",
+                            __func__, cam_evt->server_event_type, m_state);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_SNAPSHOT_DONE:
+        {
+            // No ops, but need to notify
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalEvtResult(&result);
+        }
+       break;
+    case QCAMERA_SM_EVT_THERMAL_NOTIFY:
+        {
+            rc = m_parent->updateThermalLevel(payload);
+        }
+        break;
+    case QCAMERA_SM_EVT_JPEG_EVT_NOTIFY:
+    default:
+        ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : procEvtPicTakingState
+ *
+ * DESCRIPTION: finite state machine function to handle event in state of
+ *              QCAMERA_SM_STATE_PIC_TAKING.
+ *
+ * PARAMETERS :
+ *   @evt      : event to be processed
+ *   @payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procEvtPicTakingState(qcamera_sm_evt_enum_t evt,
+                                                   void *payload)
+{
+    int32_t rc = NO_ERROR;
+    qcamera_api_result_t result;
+    memset(&result, 0, sizeof(qcamera_api_result_t));
+
+    ALOGV("%s: event (%d)", __func__, evt);
+    switch (evt) {
+    case QCAMERA_SM_EVT_SET_PREVIEW_WINDOW:
+        {
+            // Error setting preview window during previewing
+            ALOGE("Error!! cannot set preview window when preview is running");
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_CALLBACKS:
+        {
+            qcamera_sm_evt_setcb_payload_t *setcbs =
+                (qcamera_sm_evt_setcb_payload_t *)payload;
+            rc = m_parent->setCallBacks(setcbs->notify_cb,
+                                        setcbs->data_cb,
+                                        setcbs->data_cb_timestamp,
+                                        setcbs->get_memory,
+                                        setcbs->user);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_ENABLE_MSG_TYPE:
+        {
+            rc = m_parent->enableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DISABLE_MSG_TYPE:
+        {
+            rc = m_parent->disableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_MSG_TYPE_ENABLED:
+        {
+            int enabled = m_parent->msgTypeEnabled(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = enabled;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_PARAMS:
+        {
+            bool needRestart = false;
+            rc = m_parent->updateParameters((char*)payload, needRestart);
+            if (rc == NO_ERROR) {
+                rc = m_parent->commitParameterChanges();
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_GET_PARAMS:
+        {
+            result.params = m_parent->getParameters();
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_PARAMS;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PUT_PARAMS:
+        {
+            rc = m_parent->putParameters((char*)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_PREVIEW:
+        {
+            // cancel picture first
+            rc = m_parent->cancelPicture();
+            m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREVIEW_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 0;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RECORDING_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 0;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS:
+        {
+            rc = m_parent->storeMetaDataInBuffers(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DUMP:
+        {
+            rc = m_parent->dump(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_AUTO_FOCUS:
+        {
+            rc = m_parent->autoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_AUTO_FOCUS:
+        {
+            rc = m_parent->cancelAutoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SEND_COMMAND:
+        {
+            qcamera_sm_evt_command_payload_t *cmd_payload =
+                (qcamera_sm_evt_command_payload_t *)payload;
+            rc = m_parent->sendCommand(cmd_payload->cmd,
+                                       cmd_payload->arg1,
+                                       cmd_payload->arg2);
+#ifndef VANILLA_HAL
+            if ( CAMERA_CMD_LONGSHOT_OFF == cmd_payload->cmd ) {
+                // move state to previewing state
+                m_state = QCAMERA_SM_STATE_PREVIEWING;
+            }
+#endif
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_CANCEL_PICTURE:
+        {
+            rc = m_parent->cancelPicture();
+            m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_REG_FACE_IMAGE:
+        {
+            int32_t faceID = 0;
+            qcamera_sm_evt_reg_face_payload_t *reg_payload =
+                (qcamera_sm_evt_reg_face_payload_t *)payload;
+            rc = m_parent->registerFaceImage(reg_payload->img_ptr,
+                                             reg_payload->config,
+                                             faceID);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_HANDLE;
+            result.handle = faceID;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_TAKE_PICTURE:
+        {
+           if ( m_parent->isLongshotEnabled() ) {
+               rc = m_parent->longShot();
+            } else {
+                ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+                rc = INVALID_OPERATION;
+            }
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREPARE_SNAPSHOT:
+    case QCAMERA_SM_EVT_START_RECORDING:
+    case QCAMERA_SM_EVT_STOP_RECORDING:
+    case QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME:
+    case QCAMERA_SM_EVT_START_PREVIEW:
+    case QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW:
+    case QCAMERA_SM_EVT_RELEASE:
+        {
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_INTERNAL:
+        {
+            qcamera_sm_internal_evt_payload_t *internal_evt =
+                (qcamera_sm_internal_evt_payload_t *)payload;
+            switch (internal_evt->evt_type) {
+            case QCAMERA_INTERNAL_EVT_FOCUS_UPDATE:
+                rc = m_parent->processAutoFocusEvent(internal_evt->focus_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE:
+                break;
+            case QCAMERA_INTERNAL_EVT_FACE_DETECT_RESULT:
+                break;
+            case QCAMERA_INTERNAL_EVT_HISTOGRAM_STATS:
+                break;
+            case QCAMERA_INTERNAL_EVT_CROP_INFO:
+                rc = m_parent->processZoomEvent(internal_evt->crop_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_ASD_UPDATE:
+                rc = m_parent->processASDUpdate(internal_evt->asd_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE:
+                ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+                break;
+            case QCAMERA_INTERNAL_EVT_AWB_UPDATE:
+                rc = m_parent->transAwbMetaToParams(internal_evt->awb_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_AE_UPDATE:
+                rc = m_parent->processAEInfo(internal_evt->ae_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_FOCUS_POS_UPDATE:
+                rc = m_parent->processFocusPositionInfo(internal_evt->focus_pos);
+                break;
+            case QCAMERA_INTERNAL_EVT_HDR_UPDATE:
+                rc = m_parent->processHDRData(internal_evt->hdr_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_RETRO_AEC_UNLOCK:
+                rc = m_parent->processRetroAECUnlock();
+                break;
+            case QCAMERA_INTERNAL_EVT_ZSL_CAPTURE_DONE:
+                rc = m_parent->processZSLCaptureDone();
+                break;
+            default:
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_NOTIFY:
+        {
+            mm_camera_event_t *cam_evt = (mm_camera_event_t *)payload;
+            switch (cam_evt->server_event_type) {
+            case CAM_EVENT_TYPE_DAEMON_DIED:
+                {
+                    // Send internal events to release statemachine
+                    // thread to process CAMERA_ERROR_SERVER_DIED error
+                    result.status = rc;
+                    result.request_api = QCAMERA_SM_EVT_PREPARE_SNAPSHOT;
+                    result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                    m_parent->signalAPIResult(&result);
+
+                    result.status = rc;
+                    result.request_api = QCAMERA_SM_EVT_TAKE_PICTURE;
+                    result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                    m_parent->signalAPIResult(&result);
+
+                    //close the camera backend
+                    mm_camera_vtbl_t* handle = m_parent->mCameraHandle;
+                    if (handle && handle->ops) {
+                        handle->ops->error_close_camera(handle->camera_handle);
+                    } else {
+                        ALOGE("%s: Could not close because the handle or ops is NULL",
+                                __func__);
+                    }
+                    m_parent->sendEvtNotify(CAMERA_MSG_ERROR,
+                                            CAMERA_ERROR_SERVER_DIED,
+                                            0);
+                }
+                break;
+            default:
+                CDBG_HIGH("%s: no handling for server evt (%d) at this state",
+                      __func__, cam_evt->server_event_type);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_JPEG_EVT_NOTIFY:
+        {
+            qcamera_jpeg_evt_payload_t *jpeg_job =
+                (qcamera_jpeg_evt_payload_t *)payload;
+            rc = m_parent->processJpegNotify(jpeg_job);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_CAPTURE_CHANNEL:
+        {
+            bool restartPreview = m_parent->isPreviewRestartEnabled();
+            rc = m_parent->stopCaptureChannel(restartPreview);
+
+            if (restartPreview && (NO_ERROR == rc)) {
+                rc = m_parent->preparePreview();
+                if (NO_ERROR == rc) {
+                    m_parent->m_bPreviewStarted = true;
+                    applyDelayedMsgs();
+                    rc = m_parent->startPreview();
+                }
+            }
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SNAPSHOT_DONE:
+        {
+            rc = m_parent->cancelPicture();
+
+            bool restartPreview = m_parent->isPreviewRestartEnabled();
+            if (restartPreview) {
+                m_state = QCAMERA_SM_STATE_PREVIEWING;
+            } else {
+                m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+            }
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalEvtResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_THERMAL_NOTIFY:
+        {
+            rc = m_parent->updateThermalLevel(payload);
+        }
+        break;
+    default:
+        ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : procEvtRecordingState
+ *
+ * DESCRIPTION: finite state machine function to handle event in state of
+ *              QCAMERA_SM_STATE_RECORDING.
+ *
+ * PARAMETERS :
+ *   @evt      : event to be processed
+ *   @payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procEvtRecordingState(qcamera_sm_evt_enum_t evt,
+                                                   void *payload)
+{
+    int32_t rc = NO_ERROR;
+    qcamera_api_result_t result;
+    memset(&result, 0, sizeof(qcamera_api_result_t));
+
+    ALOGV("%s: event (%d)", __func__, evt);
+    switch (evt) {
+    case QCAMERA_SM_EVT_START_PREVIEW:
+    case QCAMERA_SM_EVT_SET_PREVIEW_WINDOW:
+        {
+            // WA: CTS test VideoSnapshot will try to
+            //     start preview during video recording.
+            CDBG_HIGH("CTS video restart op");
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_CALLBACKS:
+        {
+            qcamera_sm_evt_setcb_payload_t *setcbs =
+                (qcamera_sm_evt_setcb_payload_t *)payload;
+            rc = m_parent->setCallBacks(setcbs->notify_cb,
+                                        setcbs->data_cb,
+                                        setcbs->data_cb_timestamp,
+                                        setcbs->get_memory,
+                                        setcbs->user);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_ENABLE_MSG_TYPE:
+        {
+            rc = m_parent->enableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DISABLE_MSG_TYPE:
+        {
+            rc = m_parent->disableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_MSG_TYPE_ENABLED:
+        {
+            int enabled = m_parent->msgTypeEnabled(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = enabled;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_PARAMS:
+        {
+            bool needRestart = false;
+            rc = m_parent->updateParameters((char*)payload, needRestart);
+            if (rc == NO_ERROR) {
+                if (needRestart) {
+                    // cannot set parameters that requires restart during recording
+                    ALOGE("%s: Error!! cannot set parameters that requires restart during recording",
+                          __func__);
+                    rc = BAD_VALUE;
+                } else {
+                    rc = m_parent->commitParameterChanges();
+                }
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_GET_PARAMS:
+        {
+            result.params = m_parent->getParameters();
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_PARAMS;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PUT_PARAMS:
+        {
+            rc = m_parent->putParameters((char*)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREVIEW_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 0;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RECORDING_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 1;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS:
+        {
+            rc = m_parent->storeMetaDataInBuffers(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DUMP:
+        {
+            rc = m_parent->dump(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_AUTO_FOCUS:
+        {
+            rc = m_parent->autoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_AUTO_FOCUS:
+        {
+            rc = m_parent->cancelAutoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SEND_COMMAND:
+        {
+            qcamera_sm_evt_command_payload_t *cmd_payload =
+                (qcamera_sm_evt_command_payload_t *)payload;
+            rc = m_parent->sendCommand(cmd_payload->cmd,
+                                       cmd_payload->arg1,
+                                       cmd_payload->arg2);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_TAKE_PICTURE:
+        {
+            m_state = QCAMERA_SM_STATE_VIDEO_PIC_TAKING;
+            rc = m_parent->takeLiveSnapshot();
+            if (rc != NO_ERROR) {
+                m_state = QCAMERA_SM_STATE_RECORDING;
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_RECORDING:
+        {
+            // no ops here
+            CDBG_HIGH("%s: already in recording state, no ops for start_recording", __func__);
+            rc = 0;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_RECORDING:
+        {
+            rc = m_parent->stopRecording();
+            m_state = QCAMERA_SM_STATE_PREVIEWING;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_PREVIEW:
+        {
+            rc = m_parent->stopRecording();
+            m_state = QCAMERA_SM_STATE_PREVIEWING;
+
+            rc = m_parent->stopPreview();
+            m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME:
+        {
+            rc = m_parent->releaseRecordingFrame((const void *)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_REG_FACE_IMAGE:
+        {
+            int32_t faceID = 0;
+            qcamera_sm_evt_reg_face_payload_t *reg_payload =
+                (qcamera_sm_evt_reg_face_payload_t *)payload;
+            rc = m_parent->registerFaceImage(reg_payload->img_ptr,
+                                             reg_payload->config,
+                                             faceID);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_HANDLE;
+            result.handle = faceID;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREPARE_SNAPSHOT:
+        {
+            //In Video snapshot, prepare hardware is a no-op.
+            result.status = NO_ERROR;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_CANCEL_PICTURE:
+    case QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW:
+    case QCAMERA_SM_EVT_RELEASE:
+        {
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_INTERNAL:
+        {
+            qcamera_sm_internal_evt_payload_t *internal_evt =
+                (qcamera_sm_internal_evt_payload_t *)payload;
+            switch (internal_evt->evt_type) {
+            case QCAMERA_INTERNAL_EVT_FOCUS_UPDATE:
+                rc = m_parent->processAutoFocusEvent(internal_evt->focus_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE:
+                break;
+            case QCAMERA_INTERNAL_EVT_FACE_DETECT_RESULT:
+                rc = m_parent->processFaceDetectionResult(&internal_evt->faces_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_HISTOGRAM_STATS:
+                rc = m_parent->processHistogramStats(internal_evt->stats_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_CROP_INFO:
+                rc = m_parent->processZoomEvent(internal_evt->crop_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_ASD_UPDATE:
+                rc = m_parent->processASDUpdate(internal_evt->asd_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE:
+                ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+                break;
+            case QCAMERA_INTERNAL_EVT_AWB_UPDATE:
+                rc = m_parent->transAwbMetaToParams(internal_evt->awb_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_AE_UPDATE:
+                rc = m_parent->processAEInfo(internal_evt->ae_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_FOCUS_POS_UPDATE:
+                rc = m_parent->processFocusPositionInfo(internal_evt->focus_pos);
+                break;
+            case QCAMERA_INTERNAL_EVT_HDR_UPDATE:
+                rc = m_parent->processHDRData(internal_evt->hdr_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_RETRO_AEC_UNLOCK:
+                rc = m_parent->processRetroAECUnlock();
+                break;
+            case QCAMERA_INTERNAL_EVT_ZSL_CAPTURE_DONE:
+                rc = m_parent->processZSLCaptureDone();
+                break;
+            default:
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_NOTIFY:
+        {
+            mm_camera_event_t *cam_evt = (mm_camera_event_t *)payload;
+            switch (cam_evt->server_event_type) {
+            case CAM_EVENT_TYPE_DAEMON_DIED:
+                {
+                    //close the camera backend
+                    mm_camera_vtbl_t* handle = m_parent->mCameraHandle;
+                    if (handle && handle->ops) {
+                        handle->ops->error_close_camera(handle->camera_handle);
+                    } else {
+                        ALOGE("%s: Could not close because the handle or ops is NULL",
+                                __func__);
+                    }
+                    m_parent->sendEvtNotify(CAMERA_MSG_ERROR,
+                                            CAMERA_ERROR_SERVER_DIED,
+                                            0);
+                }
+                break;
+            default:
+                ALOGE("%s: Invalid internal event %d in state(%d)",
+                            __func__, cam_evt->server_event_type, m_state);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_THERMAL_NOTIFY:
+        {
+            rc = m_parent->updateThermalLevel(payload);
+        }
+        break;
+    case QCAMERA_SM_EVT_SNAPSHOT_DONE:
+        {
+            // No ops, but need to notify
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalEvtResult(&result);
+        }
+       break;
+    case QCAMERA_SM_EVT_JPEG_EVT_NOTIFY:
+    default:
+        ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : procEvtVideoPicTakingState
+ *
+ * DESCRIPTION: finite state machine function to handle event in state of
+ *              QCAMERA_SM_STATE_VIDEO_PIC_TAKING.
+ *
+ * PARAMETERS :
+ *   @evt      : event to be processed
+ *   @payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procEvtVideoPicTakingState(qcamera_sm_evt_enum_t evt,
+                                                        void *payload)
+{
+    int32_t rc = NO_ERROR;
+    qcamera_api_result_t result;
+    memset(&result, 0, sizeof(qcamera_api_result_t));
+
+    ALOGV("%s: event (%d)", __func__, evt);
+    switch (evt) {
+    case QCAMERA_SM_EVT_SET_PREVIEW_WINDOW:
+        {
+            // Error setting preview window during previewing
+            ALOGE("Error!! cannot set preview window when preview is running");
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_CALLBACKS:
+        {
+            qcamera_sm_evt_setcb_payload_t *setcbs =
+                (qcamera_sm_evt_setcb_payload_t *)payload;
+            rc = m_parent->setCallBacks(setcbs->notify_cb,
+                                        setcbs->data_cb,
+                                        setcbs->data_cb_timestamp,
+                                        setcbs->get_memory,
+                                        setcbs->user);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_ENABLE_MSG_TYPE:
+        {
+            rc = m_parent->enableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DISABLE_MSG_TYPE:
+        {
+            rc = m_parent->disableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_MSG_TYPE_ENABLED:
+        {
+            int enabled = m_parent->msgTypeEnabled(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = enabled;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_PARAMS:
+        {
+            bool needRestart = false;
+            rc = m_parent->updateParameters((char*)payload, needRestart);
+            if (rc == NO_ERROR) {
+                if (needRestart) {
+                    // cannot set parameters that requires restart during recording
+                    ALOGE("%s: Error!! cannot set parameters that requires restart during recording",
+                          __func__);
+                    rc = BAD_VALUE;
+                } else {
+                    rc = m_parent->commitParameterChanges();
+                }
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_GET_PARAMS:
+        {
+            result.params = m_parent->getParameters();
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_PARAMS;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PUT_PARAMS:
+        {
+            rc = m_parent->putParameters((char*)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREVIEW_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 1;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RECORDING_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 1;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS:
+        {
+            rc = m_parent->storeMetaDataInBuffers(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DUMP:
+        {
+            rc = m_parent->dump(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_AUTO_FOCUS:
+        {
+            rc = m_parent->autoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_AUTO_FOCUS:
+        {
+            rc = m_parent->cancelAutoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SEND_COMMAND:
+        {
+            qcamera_sm_evt_command_payload_t *cmd_payload =
+                (qcamera_sm_evt_command_payload_t *)payload;
+            rc = m_parent->sendCommand(cmd_payload->cmd,
+                                       cmd_payload->arg1,
+                                       cmd_payload->arg2);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_RECORDING:
+        {
+            rc = m_parent->cancelLiveSnapshot();
+            m_state = QCAMERA_SM_STATE_RECORDING;
+
+            rc = m_parent->stopRecording();
+            m_state = QCAMERA_SM_STATE_PREVIEWING;
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME:
+        {
+            rc = m_parent->releaseRecordingFrame((const void *)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_CANCEL_PICTURE:
+        {
+            rc = m_parent->cancelLiveSnapshot();
+            m_state = QCAMERA_SM_STATE_RECORDING;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_REG_FACE_IMAGE:
+        {
+            int32_t faceID = 0;
+            qcamera_sm_evt_reg_face_payload_t *reg_payload =
+                (qcamera_sm_evt_reg_face_payload_t *)payload;
+            rc = m_parent->registerFaceImage(reg_payload->img_ptr,
+                                             reg_payload->config,
+                                             faceID);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_HANDLE;
+            result.handle = faceID;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_PREVIEW:
+        {
+            rc = m_parent->cancelLiveSnapshot();
+            m_state = QCAMERA_SM_STATE_RECORDING;
+
+            rc = m_parent->stopRecording();
+            m_state = QCAMERA_SM_STATE_PREVIEWING;
+
+            rc = m_parent->stopPreview();
+            m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_RECORDING:
+    case QCAMERA_SM_EVT_START_PREVIEW:
+    case QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW:
+    case QCAMERA_SM_EVT_PREPARE_SNAPSHOT:
+    case QCAMERA_SM_EVT_TAKE_PICTURE:
+    case QCAMERA_SM_EVT_RELEASE:
+        {
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_INTERNAL:
+        {
+            qcamera_sm_internal_evt_payload_t *internal_evt =
+                (qcamera_sm_internal_evt_payload_t *)payload;
+            switch (internal_evt->evt_type) {
+            case QCAMERA_INTERNAL_EVT_FOCUS_UPDATE:
+                rc = m_parent->processAutoFocusEvent(internal_evt->focus_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE:
+                break;
+            case QCAMERA_INTERNAL_EVT_FACE_DETECT_RESULT:
+                rc = m_parent->processFaceDetectionResult(&internal_evt->faces_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_HISTOGRAM_STATS:
+                rc = m_parent->processHistogramStats(internal_evt->stats_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_CROP_INFO:
+                rc = m_parent->processZoomEvent(internal_evt->crop_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_ASD_UPDATE:
+                rc = m_parent->processASDUpdate(internal_evt->asd_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE:
+                ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+                break;
+            case QCAMERA_INTERNAL_EVT_AWB_UPDATE:
+                rc = m_parent->transAwbMetaToParams(internal_evt->awb_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_AE_UPDATE:
+                rc = m_parent->processAEInfo(internal_evt->ae_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_FOCUS_POS_UPDATE:
+                rc = m_parent->processFocusPositionInfo(internal_evt->focus_pos);
+                break;
+            case QCAMERA_INTERNAL_EVT_HDR_UPDATE:
+                rc = m_parent->processHDRData(internal_evt->hdr_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_RETRO_AEC_UNLOCK:
+                rc = m_parent->processRetroAECUnlock();
+                break;
+            case QCAMERA_INTERNAL_EVT_ZSL_CAPTURE_DONE:
+                rc = m_parent->processZSLCaptureDone();
+                break;
+            default:
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_NOTIFY:
+        {
+            mm_camera_event_t *cam_evt = (mm_camera_event_t *)payload;
+            switch (cam_evt->server_event_type) {
+            case CAM_EVENT_TYPE_DAEMON_DIED:
+                {
+                    //close the camera backend
+                    mm_camera_vtbl_t* handle = m_parent->mCameraHandle;
+                    if (handle && handle->ops) {
+                        handle->ops->error_close_camera(handle->camera_handle);
+                    } else {
+                        ALOGE("%s: Could not close because the handle or ops is NULL",
+                                __func__);
+                    }
+                    m_parent->sendEvtNotify(CAMERA_MSG_ERROR,
+                                            CAMERA_ERROR_SERVER_DIED,
+                                            0);
+                }
+                break;
+            default:
+                ALOGE("%s: Invalid internal event %d in state(%d)",
+                            __func__, cam_evt->server_event_type, m_state);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_JPEG_EVT_NOTIFY:
+        {
+            qcamera_jpeg_evt_payload_t *jpeg_job =
+                (qcamera_jpeg_evt_payload_t *)payload;
+            rc = m_parent->processJpegNotify(jpeg_job);
+        }
+        break;
+    case QCAMERA_SM_EVT_SNAPSHOT_DONE:
+        {
+            rc = m_parent->cancelLiveSnapshot();
+            m_state = QCAMERA_SM_STATE_RECORDING;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalEvtResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_THERMAL_NOTIFY:
+        {
+            rc = m_parent->updateThermalLevel(payload);
+        }
+        break;
+    default:
+        ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : procEvtPreviewPicTakingState
+ *
+ * DESCRIPTION: finite state machine function to handle event in state of
+ *              QCAMERA_SM_STATE_PREVIEW_PIC_TAKING.
+ *
+ * PARAMETERS :
+ *   @evt      : event to be processed
+ *   @payload  : event payload. Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStateMachine::procEvtPreviewPicTakingState(qcamera_sm_evt_enum_t evt,
+                                                          void *payload)
+{
+    int32_t rc = NO_ERROR;
+    qcamera_api_result_t result;
+    memset(&result, 0, sizeof(qcamera_api_result_t));
+
+    ALOGV("%s: event (%d)", __func__, evt);
+    switch (evt) {
+    case QCAMERA_SM_EVT_SET_CALLBACKS:
+        {
+            qcamera_sm_evt_setcb_payload_t *setcbs =
+                (qcamera_sm_evt_setcb_payload_t *)payload;
+            rc = m_parent->setCallBacks(setcbs->notify_cb,
+                                        setcbs->data_cb,
+                                        setcbs->data_cb_timestamp,
+                                        setcbs->get_memory,
+                                        setcbs->user);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_ENABLE_MSG_TYPE:
+        {
+            rc = m_parent->enableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DISABLE_MSG_TYPE:
+        {
+            rc = m_parent->disableMsgType(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_MSG_TYPE_ENABLED:
+        {
+            int enabled = m_parent->msgTypeEnabled(*((int32_t *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = enabled;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SET_PARAMS:
+        {
+            bool needRestart = false;
+            rc = m_parent->updateParameters((char*)payload, needRestart);
+            if (rc == NO_ERROR) {
+                if (needRestart) {
+                    // need restart preview for parameters to take effect
+                    // stop preview
+                    m_parent->stopPreview();
+                    // Clear memory pools
+                    m_parent->m_memoryPool.clear();
+                    // commit parameter changes to server
+                    m_parent->commitParameterChanges();
+                    // start preview again
+                    rc = m_parent->preparePreview();
+                    if (rc == NO_ERROR) {
+                        applyDelayedMsgs();
+                        rc = m_parent->startPreview();
+                        if (rc != NO_ERROR) {
+                            m_parent->unpreparePreview();
+                        }
+                    }
+                    if (rc != NO_ERROR) {
+                        m_state = QCAMERA_SM_STATE_PIC_TAKING;
+                    }
+                } else {
+                    rc = m_parent->commitParameterChanges();
+                }
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_GET_PARAMS:
+        {
+            result.params = m_parent->getParameters();
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_PARAMS;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PUT_PARAMS:
+        {
+            rc = m_parent->putParameters((char*)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_PREVIEW_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 1;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RECORDING_ENABLED:
+        {
+            rc = NO_ERROR;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_ENABLE_FLAG;
+            result.enabled = 0;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS:
+        {
+            rc = m_parent->storeMetaDataInBuffers(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_DUMP:
+        {
+            rc = m_parent->dump(*((int *)payload));
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_AUTO_FOCUS:
+        {
+            rc = m_parent->autoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_AUTO_FOCUS:
+        {
+            rc = m_parent->cancelAutoFocus();
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_SEND_COMMAND:
+        {
+            qcamera_sm_evt_command_payload_t *cmd_payload =
+                (qcamera_sm_evt_command_payload_t *)payload;
+            rc = m_parent->sendCommand(cmd_payload->cmd,
+                                       cmd_payload->arg1,
+                                       cmd_payload->arg2);
+#ifndef VANILLA_HAL
+            if ( CAMERA_CMD_LONGSHOT_OFF == cmd_payload->cmd ) {
+                // move state to previewing state
+                m_state = QCAMERA_SM_STATE_PREVIEWING;
+            }
+#endif
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME:
+        {
+            rc = m_parent->releaseRecordingFrame((const void *)payload);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_CANCEL_PICTURE:
+        {
+            if (m_parent->isZSLMode() || m_parent->isLongshotEnabled()) {
+                rc = m_parent->cancelPicture();
+            } else {
+                rc = m_parent->cancelLiveSnapshot();
+            }
+            m_state = QCAMERA_SM_STATE_PREVIEWING;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_PREVIEW:
+        {
+            if (m_parent->isZSLMode()) {
+                // cancel picture first
+                rc = m_parent->cancelPicture();
+                m_parent->stopChannel(QCAMERA_CH_TYPE_ZSL);
+            } else if (m_parent->isLongshotEnabled()) {
+                // just cancel picture
+                rc = m_parent->cancelPicture();
+            } else {
+                rc = m_parent->cancelLiveSnapshot();
+                m_parent->stopChannel(QCAMERA_CH_TYPE_PREVIEW);
+            }
+            // unprepare preview
+            m_parent->unpreparePreview();
+            m_state = QCAMERA_SM_STATE_PREVIEW_STOPPED;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_START_RECORDING:
+        {
+            if (m_parent->isZSLMode()) {
+                ALOGE("%s: Error!! cannot handle evt(%d) in state(%d) in ZSL mode",
+                      __func__, evt, m_state);
+                rc = INVALID_OPERATION;
+            } else if (m_parent->isLongshotEnabled()) {
+                ALOGE("%s: Error!! cannot handle evt(%d) in state(%d) in Longshot mode",
+                      __func__, evt, m_state);
+                rc = INVALID_OPERATION;
+            } else {
+                rc = m_parent->startRecording();
+                if (rc == NO_ERROR) {
+                    m_state = QCAMERA_SM_STATE_VIDEO_PIC_TAKING;
+                }
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_REG_FACE_IMAGE:
+        {
+            int32_t faceID = 0;
+            qcamera_sm_evt_reg_face_payload_t *reg_payload =
+                (qcamera_sm_evt_reg_face_payload_t *)payload;
+            rc = m_parent->registerFaceImage(reg_payload->img_ptr,
+                                             reg_payload->config,
+                                             faceID);
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_HANDLE;
+            result.handle = faceID;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_TAKE_PICTURE:
+        {
+            if ( m_parent->isLongshotEnabled() ) {
+               rc = m_parent->longShot();
+            } else {
+                ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+                rc = INVALID_OPERATION;
+            }
+
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+
+    case QCAMERA_SM_EVT_PREPARE_SNAPSHOT:
+        {
+          ALOGD("%s: [ZSL Retro]Prepare Snapshot", __func__);
+          if (m_parent->isRetroPicture()) {
+              ALOGD("%s: [ZSL Retro] Prepare Snapshot in Retro Mode", __func__);
+              rc = m_parent->prepareHardwareForSnapshot(FALSE);
+              if (rc != NO_ERROR) {
+                  ALOGE("%s: [ZSL Retro]prepareHardwareForSnapshot failed %d",
+                      __func__, rc);
+                  result.status = rc;
+                  result.request_api = evt;
+                  result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                  m_parent->signalAPIResult(&result);
+              }
+          }
+          else {
+              ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)",
+                __func__, evt, m_state);
+              rc = INVALID_OPERATION;
+              result.status = rc;
+              result.request_api = evt;
+              result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+              m_parent->signalAPIResult(&result);
+          }
+        }
+        break;
+    case QCAMERA_SM_EVT_STOP_RECORDING:
+    case QCAMERA_SM_EVT_START_PREVIEW:
+    case QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW:
+    case QCAMERA_SM_EVT_SET_PREVIEW_WINDOW:
+    case QCAMERA_SM_EVT_RELEASE:
+        {
+            ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+            rc = INVALID_OPERATION;
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalAPIResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_INTERNAL:
+        {
+            qcamera_sm_internal_evt_payload_t *internal_evt =
+                (qcamera_sm_internal_evt_payload_t *)payload;
+            switch (internal_evt->evt_type) {
+            case QCAMERA_INTERNAL_EVT_FOCUS_UPDATE:
+                rc = m_parent->processAutoFocusEvent(internal_evt->focus_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE:
+                ALOGD("%s: [ZSL Retro]Received QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE event",
+                        __func__);
+                if (m_parent->isRetroPicture()) {
+                    m_parent->processPrepSnapshotDoneEvent(internal_evt->prep_snapshot_state);
+                    ALOGD("%s: [ZSL Retro] Retro picture", __func__);
+                    result.status = NO_ERROR;
+                    result.request_api = QCAMERA_SM_EVT_PREPARE_SNAPSHOT;
+                    result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                    m_parent->signalAPIResult(&result);
+                }
+                else {
+                    ALOGE("%s: [ZSL Retro] Invalid Case for  "
+                            "QCAMERA_INTERNAL_EVT_READY_FOR_SNAPSHOT event", __func__);
+                }
+                break;
+            case QCAMERA_INTERNAL_EVT_FACE_DETECT_RESULT:
+                rc = m_parent->processFaceDetectionResult(&internal_evt->faces_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_READY_FOR_SNAPSHOT:
+                // This is valid only in Retro picture Mode
+                if (m_parent->isRetroPicture()) {
+                    ALOGD("%s: [ZSL Retro] Received QCAMERA_INTERNAL_EVT_READY_FOR_SNAPSHOT event",
+                            __func__);
+                    result.status = NO_ERROR;
+                    result.request_api = QCAMERA_SM_EVT_TAKE_PICTURE;
+                    result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                    m_parent->signalAPIResult(&result);
+                }
+                else {
+                    ALOGD("%s: [ZSL Retro] Wrong Case for  "
+                           "QCAMERA_INTERNAL_EVT_READY_FOR_SNAPSHOT event", __func__);
+                }
+                break;
+            case QCAMERA_INTERNAL_EVT_HISTOGRAM_STATS:
+                rc = m_parent->processHistogramStats(internal_evt->stats_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_CROP_INFO:
+                rc = m_parent->processZoomEvent(internal_evt->crop_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_ASD_UPDATE:
+                rc = m_parent->processASDUpdate(internal_evt->asd_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE:
+                ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+                break;
+            case QCAMERA_INTERNAL_EVT_AWB_UPDATE:
+                rc = m_parent->transAwbMetaToParams(internal_evt->awb_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_AE_UPDATE:
+                rc = m_parent->processAEInfo(internal_evt->ae_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_FOCUS_POS_UPDATE:
+                rc = m_parent->processFocusPositionInfo(internal_evt->focus_pos);
+                break;
+            case QCAMERA_INTERNAL_EVT_HDR_UPDATE:
+                rc = m_parent->processHDRData(internal_evt->hdr_data);
+                break;
+            case QCAMERA_INTERNAL_EVT_RETRO_AEC_UNLOCK:
+                rc = m_parent->processRetroAECUnlock();
+                break;
+            case QCAMERA_INTERNAL_EVT_ZSL_CAPTURE_DONE:
+                rc = m_parent->processZSLCaptureDone();
+                break;
+            default:
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_EVT_NOTIFY:
+        {
+            mm_camera_event_t *cam_evt = (mm_camera_event_t *)payload;
+            switch (cam_evt->server_event_type) {
+            case CAM_EVENT_TYPE_DAEMON_DIED:
+                {
+                    //close the camera backend
+                    mm_camera_vtbl_t* handle = m_parent->mCameraHandle;
+                    if (handle && handle->ops) {
+                        handle->ops->error_close_camera(handle->camera_handle);
+                    } else {
+                        ALOGE("%s: Could not close because the handle or ops is NULL",
+                                __func__);
+                    }
+                    m_parent->sendEvtNotify(CAMERA_MSG_ERROR,
+                                            CAMERA_ERROR_SERVER_DIED,
+                                            0);
+                }
+                break;
+            default:
+                ALOGE("%s: Invalid internal event %d in state(%d)",
+                            __func__, cam_evt->server_event_type, m_state);
+                break;
+            }
+        }
+        break;
+    case QCAMERA_SM_EVT_JPEG_EVT_NOTIFY:
+        {
+            ALOGV("%s: [ZSL Retro] Calling Process Jpeg Notify",
+            __func__);
+            qcamera_jpeg_evt_payload_t *jpeg_job =
+                (qcamera_jpeg_evt_payload_t *)payload;
+            rc = m_parent->processJpegNotify(jpeg_job);
+        }
+        break;
+    case QCAMERA_SM_EVT_SNAPSHOT_DONE:
+        {
+            ALOGV("%s: [ZSL Retro] Snapshot Done", __func__);
+            if (m_parent->isZSLMode() || m_parent->isLongshotEnabled()) {
+                rc = m_parent->cancelPicture();
+            } else {
+                rc = m_parent->cancelLiveSnapshot();
+            }
+            m_state = QCAMERA_SM_STATE_PREVIEWING;
+            if (m_parent->isRetroPicture()){
+                result.status = rc;
+                result.request_api = evt;
+                result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+                ALOGV("\n Signalling for JPEG snapshot done!!");
+                m_parent->signalAPIResult(&result);
+
+            }
+            result.status = rc;
+            result.request_api = evt;
+            result.result_type = QCAMERA_API_RESULT_TYPE_DEF;
+            m_parent->signalEvtResult(&result);
+        }
+        break;
+    case QCAMERA_SM_EVT_THERMAL_NOTIFY:
+        {
+            rc = m_parent->updateThermalLevel(payload);
+        }
+        break;
+    default:
+        ALOGE("%s: Error!! cannot handle evt(%d) in state(%d)", __func__, evt, m_state);
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : isRecording
+ *
+ * DESCRIPTION: check if recording is in process.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : true -- recording
+ *              false -- not in recording mode
+ *==========================================================================*/
+bool QCameraStateMachine::isRecording()
+{
+    switch (m_state) {
+    case QCAMERA_SM_STATE_RECORDING:
+    case QCAMERA_SM_STATE_VIDEO_PIC_TAKING:
+        return true;
+    default:
+        return false;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : isPreviewRunning
+ *
+ * DESCRIPTION: check if preview is in process.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : true -- preview running
+ *              false -- preview stopped
+ *==========================================================================*/
+bool QCameraStateMachine::isPreviewRunning()
+{
+    switch (m_state) {
+    case QCAMERA_SM_STATE_PREVIEWING:
+    case QCAMERA_SM_STATE_RECORDING:
+    case QCAMERA_SM_STATE_VIDEO_PIC_TAKING:
+    case QCAMERA_SM_STATE_PREVIEW_PIC_TAKING:
+    case QCAMERA_SM_STATE_PREPARE_SNAPSHOT:
+    case QCAMERA_SM_STATE_PREVIEW_READY:
+        return true;
+    default:
+        return false;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : isPreviewReady
+ *
+ * DESCRIPTION: check if preview is in ready state.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : true -- preview is in ready state
+ *              false -- preview is stopped
+ *==========================================================================*/
+bool QCameraStateMachine::isPreviewReady()
+{
+    switch (m_state) {
+    case QCAMERA_SM_STATE_PREVIEW_READY:
+        return true;
+    default:
+        return false;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : isCaptureRunning
+ *
+ * DESCRIPTION: check if image capture is in process.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : true -- capture running
+ *              false -- capture stopped
+ *==========================================================================*/
+bool QCameraStateMachine::isCaptureRunning()
+{
+    switch (m_state) {
+    case QCAMERA_SM_STATE_PIC_TAKING:
+    case QCAMERA_SM_STATE_VIDEO_PIC_TAKING:
+    case QCAMERA_SM_STATE_PREVIEW_PIC_TAKING:
+        return true;
+    default:
+        return false;
+    }
+}
+/*===========================================================================
+ * FUNCTION   : isNonZSLCaptureRunning
+ *
+ * DESCRIPTION: check if image capture is in process in non ZSL mode.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : true -- capture running in non ZSL mode
+ *              false -- Either in not capture mode or captur is not in non ZSL mode
+ *==========================================================================*/
+bool QCameraStateMachine::isNonZSLCaptureRunning()
+{
+    switch (m_state) {
+    case QCAMERA_SM_STATE_PIC_TAKING:
+        return true;
+    default:
+        return false;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : dump
+ *
+ * DESCRIPTION: Composes a string based on current configuration
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : Formatted string
+ *==========================================================================*/
+String8 QCameraStateMachine::dump()
+{
+    String8 str("\n");
+    char s[128];
+
+    snprintf(s, 128, "Is Preview Running: %d\n", isPreviewRunning());
+    str += s;
+
+    snprintf(s, 128, "Is Capture Running: %d\n", isCaptureRunning());
+    str += s;
+
+    snprintf(s, 128, "Is Non ZSL Capture Running: %d\n",
+        isNonZSLCaptureRunning());
+    str += s;
+
+    snprintf(s, 128, "Current State: %d \n", m_state);
+    str += s;
+
+    switch(m_state){
+        case QCAMERA_SM_STATE_PREVIEW_STOPPED:
+        snprintf(s, 128, " QCAMERA_SM_STATE_PREVIEW_STOPPED \n");
+        break;
+
+        case QCAMERA_SM_STATE_PREVIEW_READY:
+        snprintf(s, 128, " QCAMERA_SM_STATE_PREVIEW_READY \n");
+        break;
+
+        case QCAMERA_SM_STATE_PREVIEWING:
+        snprintf(s, 128, " QCAMERA_SM_STATE_PREVIEWING \n");
+        break;
+
+        case QCAMERA_SM_STATE_PREPARE_SNAPSHOT:
+        snprintf(s, 128, " QCAMERA_SM_STATE_PREPARE_SNAPSHOT \n");
+        break;
+
+        case QCAMERA_SM_STATE_PIC_TAKING:
+        snprintf(s, 128, " QCAMERA_SM_STATE_PIC_TAKING \n");
+        break;
+
+        case QCAMERA_SM_STATE_RECORDING:
+        snprintf(s, 128, " QCAMERA_SM_STATE_RECORDING \n");
+        break;
+
+        case QCAMERA_SM_STATE_VIDEO_PIC_TAKING:
+        snprintf(s, 128, " QCAMERA_SM_STATE_VIDEO_PIC_TAKING \n");
+        break;
+
+        case QCAMERA_SM_STATE_PREVIEW_PIC_TAKING:
+        snprintf(s, 128, " QCAMERA_SM_STATE_PREVIEW_PIC_TAKING \n");
+        break;
+    }
+    str += s;
+
+    return str;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL/QCameraStateMachine.h b/camera/QCamera2/HAL/QCameraStateMachine.h
new file mode 100644
index 0000000..4540790
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraStateMachine.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_STATEMACHINE_H__
+#define __QCAMERA_STATEMACHINE_H__
+
+#include <pthread.h>
+
+#include <cam_semaphore.h>
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+#include "QCameraQueue.h"
+#include "QCameraChannel.h"
+
+namespace qcamera {
+
+class QCamera2HardwareInterface;
+
+typedef enum {
+    /*******BEGIN OF: API EVT*********/
+    QCAMERA_SM_EVT_SET_PREVIEW_WINDOW = 1,   // set preview window
+    QCAMERA_SM_EVT_SET_CALLBACKS,            // set callbacks
+    QCAMERA_SM_EVT_ENABLE_MSG_TYPE,          // enable msg type
+    QCAMERA_SM_EVT_DISABLE_MSG_TYPE,         // disable msg type
+    QCAMERA_SM_EVT_MSG_TYPE_ENABLED,         // query certain msg type is enabled
+
+    QCAMERA_SM_EVT_SET_PARAMS,               // set parameters
+    QCAMERA_SM_EVT_GET_PARAMS,               // get parameters
+    QCAMERA_SM_EVT_PUT_PARAMS,               // put parameters, release param buf
+
+    QCAMERA_SM_EVT_START_PREVIEW,            // start preview (zsl, camera mode, camcorder mode)
+    QCAMERA_SM_EVT_START_NODISPLAY_PREVIEW,  // start no display preview (zsl, camera mode, camcorder mode)
+    QCAMERA_SM_EVT_STOP_PREVIEW,             // stop preview (zsl, camera mode, camcorder mode)
+    QCAMERA_SM_EVT_PREVIEW_ENABLED,          // query if preview is running
+
+    QCAMERA_SM_EVT_STORE_METADATA_IN_BUFS,   // request to store meta data in video buffers
+    QCAMERA_SM_EVT_START_RECORDING,          // start recording
+    QCAMERA_SM_EVT_STOP_RECORDING,           // stop recording
+    QCAMERA_SM_EVT_RECORDING_ENABLED,        // query if recording is running
+    QCAMERA_SM_EVT_RELEASE_RECORIDNG_FRAME,  // release recording frame
+
+    QCAMERA_SM_EVT_PREPARE_SNAPSHOT,         // prepare snapshot in case LED needs to be flashed
+    QCAMERA_SM_EVT_TAKE_PICTURE,             // take picutre (zsl, regualr capture, live snapshot
+    QCAMERA_SM_EVT_CANCEL_PICTURE,           // cancel picture
+
+    QCAMERA_SM_EVT_START_AUTO_FOCUS,         // start auto focus
+    QCAMERA_SM_EVT_STOP_AUTO_FOCUS,          // stop auto focus
+    QCAMERA_SM_EVT_SEND_COMMAND,             // send command
+
+    QCAMERA_SM_EVT_RELEASE,                  // release camera resource
+    QCAMERA_SM_EVT_DUMP,                     // dump
+    QCAMERA_SM_EVT_REG_FACE_IMAGE,           // register a face image in imaging lib
+    /*******END OF: API EVT*********/
+
+    QCAMERA_SM_EVT_EVT_INTERNAL,             // internal evt notify
+    QCAMERA_SM_EVT_EVT_NOTIFY,               // evt notify from server
+    QCAMERA_SM_EVT_JPEG_EVT_NOTIFY,          // evt notify from jpeg
+    QCAMERA_SM_EVT_SNAPSHOT_DONE,            // internal evt that snapshot is done
+    QCAMERA_SM_EVT_THERMAL_NOTIFY,           // evt notify from thermal daemon
+    QCAMERA_SM_EVT_STOP_CAPTURE_CHANNEL,     // stop capture channel
+    QCAMERA_SM_EVT_RESTART_PERVIEW,          // internal preview restart
+    QCAMERA_SM_EVT_MAX
+} qcamera_sm_evt_enum_t;
+
+typedef enum {
+    QCAMERA_API_RESULT_TYPE_DEF,             // default type, no additional info
+    QCAMERA_API_RESULT_TYPE_ENABLE_FLAG,     // msg_enabled, preview_enabled, recording_enabled
+    QCAMERA_API_RESULT_TYPE_PARAMS,          // returned parameters in string
+    QCAMERA_API_RESULT_TYPE_HANDLE,          // returned handle in int
+    QCAMERA_API_RESULT_TYPE_MAX
+} qcamera_api_result_type_t;
+
+typedef struct {
+    int32_t status;                          // api call status
+    qcamera_sm_evt_enum_t request_api;       // api evt requested
+    qcamera_api_result_type_t result_type;   // result type
+    union {
+        int enabled;                          // result_type == QCAMERA_API_RESULT_TYPE_ENABLE_FLAG
+        char *params;                         // result_type == QCAMERA_API_RESULT_TYPE_PARAMS
+        int handle;                           // result_type ==QCAMERA_API_RESULT_TYPE_HANDLE
+    };
+} qcamera_api_result_t;
+
+typedef struct api_result_list {
+   qcamera_api_result_t result;
+   struct api_result_list *next;
+}api_result_list;
+
+// definition for payload type of setting callback
+typedef struct {
+    camera_notify_callback notify_cb;
+    camera_data_callback data_cb;
+    camera_data_timestamp_callback data_cb_timestamp;
+    camera_request_memory get_memory;
+    void *user;
+} qcamera_sm_evt_setcb_payload_t;
+
+// definition for payload type of sending command
+typedef struct {
+    int32_t cmd;
+    int32_t arg1;
+    int32_t arg2;
+} qcamera_sm_evt_command_payload_t;
+
+// definition for payload type of sending command
+typedef struct {
+    void *img_ptr;
+    cam_pp_offline_src_config_t *config;
+} qcamera_sm_evt_reg_face_payload_t;
+
+typedef enum {
+    QCAMERA_INTERNAL_EVT_FOCUS_UPDATE,       // focus updating result
+    QCAMERA_INTERNAL_EVT_PREP_SNAPSHOT_DONE, // prepare snapshot done
+    QCAMERA_INTERNAL_EVT_FACE_DETECT_RESULT, // face detection result
+    QCAMERA_INTERNAL_EVT_HISTOGRAM_STATS,    // histogram
+    QCAMERA_INTERNAL_EVT_CROP_INFO,          // crop info
+    QCAMERA_INTERNAL_EVT_ASD_UPDATE,         // asd update result
+    QCAMERA_INTERNAL_EVT_READY_FOR_SNAPSHOT, // Ready for Prepare Snapshot
+    QCAMERA_INTERNAL_EVT_LED_MODE_OVERRIDE, // Led mode override
+    QCAMERA_INTERNAL_EVT_AWB_UPDATE,         // awb update result
+    QCAMERA_INTERNAL_EVT_AE_UPDATE,          // ae update result
+    QCAMERA_INTERNAL_EVT_FOCUS_POS_UPDATE,   // focus position update result
+    QCAMERA_INTERNAL_EVT_HDR_UPDATE,         // HDR scene update
+    QCAMERA_INTERNAL_EVT_RETRO_AEC_UNLOCK,   // retro burst AEC unlock event
+    QCAMERA_INTERNAL_EVT_ZSL_CAPTURE_DONE,   // ZSL capture done event
+    QCAMERA_INTERNAL_EVT_MAX
+} qcamera_internal_evt_type_t;
+
+typedef struct {
+    qcamera_internal_evt_type_t evt_type;
+    union {
+        cam_auto_focus_data_t focus_data;
+        cam_prep_snapshot_state_t prep_snapshot_state;
+        cam_face_detection_data_t faces_data;
+        cam_hist_stats_t stats_data;
+        cam_crop_data_t crop_data;
+        cam_auto_scene_t asd_data;
+        cam_flash_mode_t led_data;
+        cam_awb_params_t awb_data;
+        cam_3a_params_t ae_data;
+        cam_focus_pos_info_t focus_pos;
+        cam_asd_hdr_scene_data_t hdr_data;
+    };
+} qcamera_sm_internal_evt_payload_t;
+
+class QCameraStateMachine
+{
+public:
+    QCameraStateMachine(QCamera2HardwareInterface *ctrl);
+    virtual ~QCameraStateMachine();
+    int32_t procAPI(qcamera_sm_evt_enum_t evt, void *api_payload);
+    int32_t procEvt(qcamera_sm_evt_enum_t evt, void *evt_payload);
+
+    bool isPreviewRunning(); // check if preview is running
+    bool isPreviewReady(); // check if preview is ready
+    bool isCaptureRunning(); // check if image capture is running
+    bool isNonZSLCaptureRunning(); // check if image capture is running in non ZSL mode
+    String8 dump(); //returns the state information in a string
+    bool isPrepSnapStateRunning();
+    bool isRecording();
+    void releaseThread();
+
+private:
+    typedef enum {
+        QCAMERA_SM_STATE_PREVIEW_STOPPED,          // preview is stopped
+        QCAMERA_SM_STATE_PREVIEW_READY,            // preview started but preview window is not set yet
+        QCAMERA_SM_STATE_PREVIEWING,               // previewing
+        QCAMERA_SM_STATE_PREPARE_SNAPSHOT,         // prepare snapshot in case aec estimation is
+                                                   // needed for LED flash
+        QCAMERA_SM_STATE_PIC_TAKING,               // taking picture (preview stopped)
+        QCAMERA_SM_STATE_RECORDING,                // recording (preview running)
+        QCAMERA_SM_STATE_VIDEO_PIC_TAKING,         // taking live snapshot during recording (preview running)
+        QCAMERA_SM_STATE_PREVIEW_PIC_TAKING        // taking ZSL/live snapshot (recording stopped but preview running)
+    } qcamera_state_enum_t;
+
+    typedef enum
+    {
+        QCAMERA_SM_CMD_TYPE_API,                   // cmd from API
+        QCAMERA_SM_CMD_TYPE_EVT,                   // cmd from mm-camera-interface/mm-jpeg-interface event
+        QCAMERA_SM_CMD_TYPE_EXIT,                  // cmd for exiting statemachine cmdThread
+        QCAMERA_SM_CMD_TYPE_MAX
+    } qcamera_sm_cmd_type_t;
+
+    typedef struct {
+        qcamera_sm_cmd_type_t cmd;                  // cmd type (where it comes from)
+        qcamera_sm_evt_enum_t evt;                  // event type
+        void *evt_payload;                          // ptr to payload
+    } qcamera_sm_cmd_t;
+
+    int32_t stateMachine(qcamera_sm_evt_enum_t evt, void *payload);
+    int32_t procEvtPreviewStoppedState(qcamera_sm_evt_enum_t evt, void *payload);
+    int32_t procEvtPreviewReadyState(qcamera_sm_evt_enum_t evt, void *payload);
+    int32_t procEvtPreviewingState(qcamera_sm_evt_enum_t evt, void *payload);
+    int32_t procEvtPrepareSnapshotState(qcamera_sm_evt_enum_t evt, void *payload);
+    int32_t procEvtPicTakingState(qcamera_sm_evt_enum_t evt, void *payload);
+    int32_t procEvtRecordingState(qcamera_sm_evt_enum_t evt, void *payload);
+    int32_t procEvtVideoPicTakingState(qcamera_sm_evt_enum_t evt, void *payload);
+    int32_t procEvtPreviewPicTakingState(qcamera_sm_evt_enum_t evt, void *payload);
+
+    // main statemachine process routine
+    static void *smEvtProcRoutine(void *data);
+
+    int32_t applyDelayedMsgs();
+
+    QCamera2HardwareInterface *m_parent;  // ptr to HWI
+    qcamera_state_enum_t m_state;         // statemachine state
+    QCameraQueue api_queue;               // cmd queue for APIs
+    QCameraQueue evt_queue;               // cmd queue for evt from mm-camera-intf/mm-jpeg-intf
+    pthread_t cmd_pid;                    // cmd thread ID
+    cam_semaphore_t cmd_sem;              // semaphore for cmd thread
+    bool m_bDelayPreviewMsgs;             // Delay preview callback enable during ZSL snapshot
+    int32_t m_DelayedMsgs;
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA_STATEMACHINE_H__ */
diff --git a/camera/QCamera2/HAL/QCameraStream.cpp b/camera/QCamera2/HAL/QCameraStream.cpp
new file mode 100644
index 0000000..80e9831
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraStream.cpp
@@ -0,0 +1,2184 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define LOG_TAG "QCameraStream"
+
+#include <utils/Errors.h>
+#include <QComOMXMetadata.h>
+#include "QCamera2HWI.h"
+#include "QCameraStream.h"
+
+#define CAMERA_MIN_ALLOCATED_BUFFERS     3
+
+namespace qcamera {
+
+/*===========================================================================
+ * FUNCTION   : get_bufs
+ *
+ * DESCRIPTION: static function entry to allocate stream buffers
+ *
+ * PARAMETERS :
+ *   @offset     : offset info of stream buffers
+ *   @num_bufs   : number of buffers allocated
+ *   @initial_reg_flag: flag to indicate if buffer needs to be registered
+ *                      at kernel initially
+ *   @bufs       : output of allocated buffers
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::get_bufs(
+                     cam_frame_len_offset_t *offset,
+                     uint8_t *num_bufs,
+                     uint8_t **initial_reg_flag,
+                     mm_camera_buf_def_t **bufs,
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                     void *user_data)
+{
+    QCameraStream *stream = reinterpret_cast<QCameraStream *>(user_data);
+    if (!stream) {
+        ALOGE("getBufs invalid stream pointer");
+        return NO_MEMORY;
+    }
+
+    if (stream->mStreamInfo != NULL
+            && stream->mStreamInfo->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        //Batch Mode. Allocate Butch buffers
+        return stream->allocateBatchBufs(offset, num_bufs,
+                initial_reg_flag, bufs, ops_tbl);
+    } else {
+        // Plane Buffer. Allocate plane buffer
+        return stream->getBufs(offset, num_bufs,
+                initial_reg_flag, bufs, ops_tbl);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : get_bufs_deffered
+ *
+ * DESCRIPTION: static function entry to allocate deffered stream buffers
+ *
+ * PARAMETERS :
+ *   @offset     : offset info of stream buffers
+ *   @num_bufs   : number of buffers allocated
+ *   @initial_reg_flag: flag to indicate if buffer needs to be registered
+ *                      at kernel initially
+ *   @bufs       : output of allocated buffers
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::get_bufs_deffered(
+        cam_frame_len_offset_t * /* offset */,
+        uint8_t *num_bufs,
+        uint8_t **initial_reg_flag,
+        mm_camera_buf_def_t **bufs,
+        mm_camera_map_unmap_ops_tbl_t * /* ops_tbl */,
+        void *user_data)
+{
+    QCameraStream *stream = reinterpret_cast<QCameraStream *>(user_data);
+    if (!stream) {
+        ALOGE("getBufs invalid stream pointer");
+        return NO_MEMORY;
+    }
+
+    *initial_reg_flag   = stream->mRegFlags;
+    *num_bufs           = stream->mNumBufs;
+    *bufs               = stream->mBufDefs;
+    CDBG_HIGH("%s: stream type: %d, mRegFlags: 0x%x, numBufs: %d",
+            __func__, stream->getMyType(), stream->mRegFlags, stream->mNumBufs);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : put_bufs
+ *
+ * DESCRIPTION: static function entry to deallocate stream buffers
+ *
+ * PARAMETERS :
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::put_bufs(
+        mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+        void *user_data)
+{
+    QCameraStream *stream = reinterpret_cast<QCameraStream *>(user_data);
+    if (!stream) {
+        ALOGE("putBufs invalid stream pointer");
+        return NO_MEMORY;
+    }
+
+    if (stream->mStreamInfo != NULL
+            && stream->mStreamInfo->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        //Batch Mode. release  Butch buffers
+        return stream->releaseBatchBufs(ops_tbl);
+    } else {
+        // Plane Buffer. release  plane buffer
+        return stream->putBufs(ops_tbl);
+    }
+
+}
+
+/*===========================================================================
+ * FUNCTION   : put_bufs_deffered
+ *
+ * DESCRIPTION: static function entry to deallocate deffered stream buffers
+ *
+ * PARAMETERS :
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::put_bufs_deffered(
+        mm_camera_map_unmap_ops_tbl_t * /*ops_tbl */,
+        void * /*user_data*/ )
+{
+    // No op
+    // Used for handling buffers with deffered allocation. They are freed separately.
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : invalidate_buf
+ *
+ * DESCRIPTION: static function entry to invalidate a specific stream buffer
+ *
+ * PARAMETERS :
+ *   @index      : index of the stream buffer to invalidate
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::invalidate_buf(uint32_t index, void *user_data)
+{
+    QCameraStream *stream = reinterpret_cast<QCameraStream *>(user_data);
+    if (!stream) {
+        ALOGE("invalid stream pointer");
+        return NO_MEMORY;
+    }
+
+    if (stream->mStreamInfo->is_secure == SECURE){
+        return 0;
+    }
+
+    if (stream->mStreamInfo->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        for (int i = 0; i < stream->mBufDefs[index].user_buf.bufs_used; i++) {
+            uint32_t buf_idx = stream->mBufDefs[index].user_buf.buf_idx[i];
+            stream->invalidateBuf(buf_idx);
+        }
+    } else {
+        return stream->invalidateBuf(index);
+    }
+
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : clean_invalidate_buf
+ *
+ * DESCRIPTION: static function entry to clean invalidate a specific stream buffer
+ *
+ * PARAMETERS :
+ *   @index      : index of the stream buffer to clean invalidate
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::clean_invalidate_buf(uint32_t index, void *user_data)
+{
+    QCameraStream *stream = reinterpret_cast<QCameraStream *>(user_data);
+    if (!stream) {
+        ALOGE("invalid stream pointer");
+        return NO_MEMORY;
+    }
+
+    if (stream->mStreamInfo->is_secure == SECURE){
+        return 0;
+    }
+
+    if (stream->mStreamInfo->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        for (int i = 0; i < stream->mBufDefs[index].user_buf.bufs_used; i++) {
+            uint32_t buf_idx = stream->mBufDefs[index].user_buf.buf_idx[i];
+            stream->cleanInvalidateBuf(buf_idx);
+        }
+    } else {
+        return stream->cleanInvalidateBuf(index);
+    }
+
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraStream
+ *
+ * DESCRIPTION: constructor of QCameraStream
+ *
+ * PARAMETERS :
+ *   @allocator  : memory allocator obj
+ *   @camHandle  : camera handle
+ *   @chId       : channel handle
+ *   @camOps     : ptr to camera ops table
+ *   @paddingInfo: ptr to padding info
+ *   @deffered   : deferred stream
+ *   @online_rotation: rotation applied online
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraStream::QCameraStream(QCameraAllocator &allocator,
+        uint32_t camHandle, uint32_t chId,
+        mm_camera_ops_t *camOps, cam_padding_info_t *paddingInfo,
+        bool deffered, cam_rotation_t online_rotation):
+        mDumpFrame(0),
+        mDumpMetaFrame(0),
+        mDumpSkipCnt(0),
+        mCamHandle(camHandle),
+        mChannelHandle(chId),
+        mHandle(0),
+        mCamOps(camOps),
+        mStreamInfo(NULL),
+        mNumBufs(0),
+        mNumPlaneBufs(0),
+        mNumBufsNeedAlloc(0),
+        mDataCB(NULL),
+        mUserData(NULL),
+        mDataQ(releaseFrameData, this),
+        mStreamInfoBuf(NULL),
+        mMiscBuf(NULL),
+        mStreamBufs(NULL),
+        mStreamBatchBufs(NULL),
+        mAllocator(allocator),
+        mBufDefs(NULL),
+        mPlaneBufDefs(NULL),
+        mOnlineRotation(online_rotation),
+        mStreamBufsAcquired(false),
+        m_bActive(false),
+        mDynBufAlloc(false),
+        mBufAllocPid(0),
+        mDefferedAllocation(deffered),
+        wait_for_cond(false)
+{
+    mMemVtbl.user_data = this;
+    if ( !deffered ) {
+        mMemVtbl.get_bufs = get_bufs;
+        mMemVtbl.put_bufs = put_bufs;
+    } else {
+        mMemVtbl.get_bufs = get_bufs_deffered;
+        mMemVtbl.put_bufs = put_bufs_deffered;
+    }
+    mMemVtbl.invalidate_buf = invalidate_buf;
+    mMemVtbl.clean_invalidate_buf = clean_invalidate_buf;
+    memset(&mFrameLenOffset, 0, sizeof(mFrameLenOffset));
+    memcpy(&mPaddingInfo, paddingInfo, sizeof(cam_padding_info_t));
+    memset(&mCropInfo, 0, sizeof(cam_rect_t));
+    memset(&m_MemOpsTbl, 0, sizeof(mm_camera_map_unmap_ops_tbl_t));
+    memset(&m_OutputCrop, 0, sizeof(cam_stream_parm_buffer_t));
+    memset(&m_ImgProp, 0, sizeof(cam_stream_parm_buffer_t));
+    pthread_mutex_init(&mCropLock, NULL);
+    pthread_mutex_init(&mParameterLock, NULL);
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraStream
+ *
+ * DESCRIPTION: deconstructor of QCameraStream
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraStream::~QCameraStream()
+{
+    pthread_mutex_destroy(&mCropLock);
+    pthread_mutex_destroy(&mParameterLock);
+
+    if (mDefferedAllocation) {
+        mStreamBufsAcquired = false;
+        releaseBuffs();
+    }
+
+    unmapStreamInfoBuf();
+    releaseStreamInfoBuf();
+
+    if (mMiscBuf) {
+        unMapBuf(mMiscBuf, CAM_MAPPING_BUF_TYPE_MISC_BUF, NULL);
+        releaseMiscBuf();
+    }
+
+    // delete stream
+    if (mHandle > 0) {
+        mCamOps->delete_stream(mCamHandle, mChannelHandle, mHandle);
+        mHandle = 0;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : unmapStreamInfoBuf
+ *
+ * DESCRIPTION: Unmap stream info buffer
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::unmapStreamInfoBuf()
+{
+    int rc = NO_ERROR;
+
+    if (mStreamInfoBuf != NULL) {
+        rc = mCamOps->unmap_stream_buf(mCamHandle,
+            mChannelHandle,
+            mHandle,
+            CAM_MAPPING_BUF_TYPE_STREAM_INFO,
+            0,
+            -1);
+
+        if (rc < 0) {
+            ALOGE("Failed to unmap stream info buffer");
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseMiscBuf
+ *
+ * DESCRIPTION: Release misc buffers
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::releaseMiscBuf()
+{
+    int rc = NO_ERROR;
+
+    if (mMiscBuf != NULL) {
+        mMiscBuf->deallocate();
+        delete mMiscBuf;
+        mMiscBuf = NULL;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseStreamInfoBuf
+ *
+ * DESCRIPTION: Release stream info buffer
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::releaseStreamInfoBuf()
+{
+    int rc = NO_ERROR;
+
+    if (mStreamInfoBuf != NULL) {
+        mStreamInfoBuf->deallocate();
+        delete mStreamInfoBuf;
+        mStreamInfoBuf = NULL;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : deleteStream
+ *
+ * DESCRIPTION: Deletes a camera stream
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraStream::deleteStream()
+{
+    if (mHandle > 0) {
+        acquireStreamBufs();
+        releaseBuffs();
+        unmapStreamInfoBuf();
+        mCamOps->delete_stream(mCamHandle, mChannelHandle, mHandle);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : unMapBuf
+ *
+ * DESCRIPTION: unmaps buffers
+ *
+ * PARAMETERS :
+ *   @heapBuf      : heap buffer handler
+ *   @bufType      : buffer type
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::unMapBuf(QCameraMemory *Buf,
+        cam_mapping_buf_type bufType, mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int32_t rc = NO_ERROR;
+    uint8_t cnt;
+    ssize_t bufSize = BAD_INDEX;
+    uint32_t i;
+
+    cnt = Buf->getCnt();
+    for (i = 0; i < cnt; i++) {
+        bufSize = Buf->getSize(i);
+        if (BAD_INDEX != bufSize) {
+            if (ops_tbl == NULL ) {
+                rc = mCamOps->unmap_stream_buf(mCamHandle, mChannelHandle, mHandle,
+                        bufType, i, -1);
+            } else {
+                rc = ops_tbl->unmap_ops(i, -1, bufType, ops_tbl->userdata);
+            }
+            if (rc < 0) {
+                ALOGE("Failed to unmap buffer");
+                break;
+            }
+        } else {
+            ALOGE("Failed to retrieve buffer size (bad index)");
+            rc = BAD_INDEX;
+            break;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mapBuf
+ *
+ * DESCRIPTION: maps buffers
+ *
+ * PARAMETERS :
+ *   @heapBuf      : heap buffer handler
+ *   @bufType      : buffer type
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::mapBuf(QCameraMemory *Buf,
+        cam_mapping_buf_type bufType, mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int32_t rc = NO_ERROR;
+    uint8_t cnt;
+    ssize_t bufSize = BAD_INDEX;
+    int32_t i = 0;
+
+    cnt = Buf->getCnt();
+    for (i = 0; i < cnt; i++) {
+        bufSize = Buf->getSize((uint32_t)i);
+        if (BAD_INDEX != bufSize) {
+            if (ops_tbl == NULL) {
+                rc = mCamOps->map_stream_buf(mCamHandle, mChannelHandle, mHandle,
+                        (uint8_t)bufType, (uint32_t)i, -1,
+                        Buf->getFd((uint32_t)i), (uint32_t)bufSize);
+            } else {
+                rc = ops_tbl->map_ops((uint32_t)i, -1, Buf->getFd((uint32_t)i),
+                        (uint32_t)bufSize, bufType, ops_tbl->userdata);
+            }
+            if (rc < 0) {
+                ALOGE("Failed to map buffer");
+                goto err1;
+            }
+        } else {
+            ALOGE("Failed to retrieve buffer size (bad index)");
+            rc = BAD_INDEX;
+            goto err1;
+        }
+    }
+
+    return rc;
+err1:
+    i -= 1;
+    for (; i >= 0; i--) {
+        bufSize = Buf->getSize((uint32_t)i);
+        if (BAD_INDEX != bufSize) {
+            if (ops_tbl == NULL) {
+                rc = mCamOps->unmap_stream_buf(mCamHandle, mChannelHandle, mHandle,
+                        (uint8_t)bufType, (uint32_t)i, -1);
+            } else {
+                rc = ops_tbl->unmap_ops((uint32_t)i, -1, bufType, ops_tbl->userdata);
+            }
+            if (rc < 0) {
+                ALOGE("Failed to unmap buffer");
+            }
+        } else {
+            ALOGE("Failed to retrieve buffer size (bad index)");
+            rc = BAD_INDEX;
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : init
+ *
+ * DESCRIPTION: initialize stream obj
+ *
+ * PARAMETERS :
+ *   @streamInfoBuf: ptr to buf that contains stream info
+ *   @miscBuf      : ptr to buf that contains misc bufs
+ *   @stream_cb    : stream data notify callback. Can be NULL if not needed
+ *   @userdata     : user data ptr
+ *   @bDynallocBuf : flag to indicate if buffer allocation can be in 2 steps
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::init(QCameraHeapMemory *streamInfoBuf,
+        QCameraHeapMemory *miscBuf,
+        uint8_t minNumBuffers,
+        stream_cb_routine stream_cb,
+        void *userdata,
+        bool bDynallocBuf)
+{
+    int32_t rc = OK;
+
+    mHandle = mCamOps->add_stream(mCamHandle, mChannelHandle);
+    if (!mHandle) {
+        ALOGE("add_stream failed");
+        rc = UNKNOWN_ERROR;
+        if (streamInfoBuf != NULL) {
+            streamInfoBuf->deallocate();
+            delete streamInfoBuf;
+            streamInfoBuf = NULL;
+        }
+        goto done;
+    }
+
+    // assign and map stream info memory
+    mStreamInfoBuf = streamInfoBuf;
+    mStreamInfo = reinterpret_cast<cam_stream_info_t *>(mStreamInfoBuf->getPtr(0));
+    mNumBufs = minNumBuffers;
+
+    rc = mapBuf(mStreamInfoBuf, CAM_MAPPING_BUF_TYPE_STREAM_INFO, NULL);
+    if (rc < 0) {
+        ALOGE("Failed to map stream info buffer");
+        releaseStreamInfoBuf();
+        mStreamInfo = 0;
+        goto err1;
+    }
+
+    mMiscBuf = miscBuf;
+    if (miscBuf) {
+        rc = mapBuf(mMiscBuf, CAM_MAPPING_BUF_TYPE_MISC_BUF, NULL);
+        if (rc < 0) {
+            ALOGE("Failed to map miscellaneous buffer");
+            releaseMiscBuf();
+            goto err1;
+        }
+    }
+
+    // Calculate buffer size for deffered allocation
+    if (mDefferedAllocation) {
+        rc = calcOffset(mStreamInfo);
+        if (rc < 0) {
+            ALOGE("%s : Failed to calculate stream offset", __func__);
+            goto err1;
+        }
+    } else {
+        rc = configStream();
+        if (rc < 0) {
+            ALOGE("%s : Failed to config stream ", __func__);
+            goto err1;
+        }
+    }
+
+    mDataCB = stream_cb;
+    mUserData = userdata;
+    mDynBufAlloc = bDynallocBuf;
+    return 0;
+
+err1:
+    mCamOps->delete_stream(mCamHandle, mChannelHandle, mHandle);
+    mHandle = 0;
+    mNumBufs = 0;
+done:
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : calcOffset
+ *
+ * DESCRIPTION: calculate frame offset based on format and padding information
+ *
+ * PARAMETERS :
+ *   @streamInfo  : stream information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t QCameraStream::calcOffset(cam_stream_info_t *streamInfo)
+{
+    int32_t rc = 0;
+
+    cam_dimension_t dim = streamInfo->dim;
+    if (streamInfo->pp_config.feature_mask & CAM_QCOM_FEATURE_ROTATION &&
+            streamInfo->stream_type != CAM_STREAM_TYPE_VIDEO) {
+        if (streamInfo->pp_config.rotation == ROTATE_90 ||
+                streamInfo->pp_config.rotation == ROTATE_270) {
+            // rotated by 90 or 270, need to switch width and height
+            dim.width = streamInfo->dim.height;
+            dim.height = streamInfo->dim.width;
+        }
+    }
+
+    switch (streamInfo->stream_type) {
+    case CAM_STREAM_TYPE_PREVIEW:
+        rc = mm_stream_calc_offset_preview(streamInfo->fmt,
+                &dim,
+                &streamInfo->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_POSTVIEW:
+        rc = mm_stream_calc_offset_post_view(streamInfo->fmt,
+                &dim,
+                &streamInfo->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_SNAPSHOT:
+        rc = mm_stream_calc_offset_snapshot(streamInfo->fmt,
+                &dim,
+                &mPaddingInfo,
+                &streamInfo->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_OFFLINE_PROC:
+        rc = mm_stream_calc_offset_postproc(streamInfo,
+                &mPaddingInfo,
+                &streamInfo->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_VIDEO:
+        rc = mm_stream_calc_offset_video(&dim,
+                &streamInfo->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_RAW:
+        rc = mm_stream_calc_offset_raw(streamInfo->fmt,
+                &dim,
+                &mPaddingInfo,
+                &streamInfo->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_ANALYSIS:
+        rc = mm_stream_calc_offset_analysis(streamInfo->fmt,
+                &dim,
+                &mPaddingInfo,
+                &streamInfo->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_METADATA:
+        rc = mm_stream_calc_offset_metadata(&dim,
+                &mPaddingInfo,
+                &streamInfo->buf_planes);
+        break;
+    default:
+        ALOGE("%s: not supported for stream type %d",
+                __func__, streamInfo->stream_type);
+        rc = -1;
+        break;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : start
+ *
+ * DESCRIPTION: start stream. Will start main stream thread to handle stream
+ *              related ops.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::start()
+{
+    int32_t rc = 0;
+    mDataQ.init();
+    rc = mProcTh.launch(dataProcRoutine, this);
+    if (rc == NO_ERROR) {
+        m_bActive = true;
+    }
+    pthread_mutex_init(&m_lock, NULL);
+    pthread_cond_init(&m_cond, NULL);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop
+ *
+ * DESCRIPTION: stop stream. Will stop main stream thread
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::stop()
+{
+    int32_t rc = 0;
+    m_bActive = false;
+    rc = mProcTh.exit();
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : syncRuntimeParams
+ *
+ * DESCRIPTION: query and sync runtime parameters like output crop
+ *              buffer info etc.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::syncRuntimeParams()
+{
+    int32_t ret = NO_ERROR;
+
+    memset(&m_OutputCrop, 0, sizeof(cam_stream_parm_buffer_t));
+    m_OutputCrop.type = CAM_STREAM_PARAM_TYPE_GET_OUTPUT_CROP;
+
+    ret = getParameter(m_OutputCrop);
+    if (ret != NO_ERROR) {
+        ALOGE("%s: stream getParameter for output crop failed", __func__);
+        return ret;
+    }
+
+    memset(&m_ImgProp, 0, sizeof(cam_stream_parm_buffer_t));
+    m_ImgProp.type = CAM_STREAM_PARAM_TYPE_GET_IMG_PROP;
+
+    ret = getParameter(m_ImgProp);
+    if (ret != NO_ERROR) {
+        ALOGE("%s: stream getParameter for image prop failed", __func__);
+        return ret;
+    }
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : processZoomDone
+ *
+ * DESCRIPTION: process zoom done event
+ *
+ * PARAMETERS :
+ *   @previewWindoe : preview window ops table to set preview crop window
+ *   @crop_info     : crop info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::processZoomDone(preview_stream_ops_t *previewWindow,
+                                       cam_crop_data_t &crop_info)
+{
+    int32_t rc = 0;
+
+    if (!m_bActive) {
+        ALOGV("%s : Stream not active", __func__);
+        return NO_ERROR;
+    }
+
+    // get stream param for crop info
+    for (int i = 0; i < crop_info.num_of_streams; i++) {
+        if (crop_info.crop_info[i].stream_id == mStreamInfo->stream_svr_id) {
+            pthread_mutex_lock(&mCropLock);
+            mCropInfo = crop_info.crop_info[i].crop;
+            pthread_mutex_unlock(&mCropLock);
+
+            // update preview window crop if it's preview/postview stream
+            if ( (previewWindow != NULL) &&
+                 (mStreamInfo->stream_type == CAM_STREAM_TYPE_PREVIEW ||
+                  mStreamInfo->stream_type == CAM_STREAM_TYPE_POSTVIEW) ) {
+                rc = previewWindow->set_crop(previewWindow,
+                                             mCropInfo.left,
+                                             mCropInfo.top,
+                                             mCropInfo.width,
+                                             mCropInfo.height);
+            }
+            break;
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : processDataNotify
+ *
+ * DESCRIPTION: process stream data notify
+ *
+ * PARAMETERS :
+ *   @frame   : stream frame received
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::processDataNotify(mm_camera_super_buf_t *frame)
+{
+    CDBG("%s:\n", __func__);
+    if (mDataQ.enqueue((void *)frame)) {
+        return mProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    } else {
+        CDBG_HIGH("%s: Stream thread is not active, no ops here", __func__);
+        bufDone(frame->bufs[0]->buf_idx);
+        free(frame);
+        return NO_ERROR;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : dataNotifyCB
+ *
+ * DESCRIPTION: callback for data notify. This function is registered with
+ *              mm-camera-interface to handle data notify
+ *
+ * PARAMETERS :
+ *   @recvd_frame   : stream frame received
+ *   userdata       : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCameraStream::dataNotifyCB(mm_camera_super_buf_t *recvd_frame,
+                                 void *userdata)
+{
+    CDBG("%s:\n", __func__);
+    QCameraStream* stream = (QCameraStream *)userdata;
+    if (stream == NULL ||
+        recvd_frame == NULL ||
+        recvd_frame->bufs[0] == NULL ||
+        recvd_frame->bufs[0]->stream_id != stream->getMyHandle()) {
+        ALOGE("%s: Not a valid stream to handle buf", __func__);
+        return;
+    }
+
+    mm_camera_super_buf_t *frame =
+        (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+    if (frame == NULL) {
+        ALOGE("%s: No mem for mm_camera_buf_def_t", __func__);
+        stream->bufDone(recvd_frame->bufs[0]->buf_idx);
+        return;
+    }
+    *frame = *recvd_frame;
+    stream->processDataNotify(frame);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : dataProcRoutine
+ *
+ * DESCRIPTION: function to process data in the main stream thread
+ *
+ * PARAMETERS :
+ *   @data    : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void *QCameraStream::dataProcRoutine(void *data)
+{
+    int running = 1;
+    int ret;
+    QCameraStream *pme = (QCameraStream *)data;
+    QCameraCmdThread *cmdThread = &pme->mProcTh;
+    cmdThread->setName("CAM_strmDatProc");
+
+    CDBG("%s: E", __func__);
+    do {
+        do {
+            ret = cam_sem_wait(&cmdThread->cmd_sem);
+            if (ret != 0 && errno != EINVAL) {
+                ALOGE("%s: cam_sem_wait error (%s)",
+                      __func__, strerror(errno));
+                return NULL;
+            }
+        } while (ret != 0);
+
+        // we got notified about new cmd avail in cmd queue
+        camera_cmd_type_t cmd = cmdThread->getCmd();
+        switch (cmd) {
+        case CAMERA_CMD_TYPE_DO_NEXT_JOB:
+            {
+                CDBG_HIGH("%s: Do next job", __func__);
+                mm_camera_super_buf_t *frame =
+                    (mm_camera_super_buf_t *)pme->mDataQ.dequeue();
+                if (NULL != frame) {
+                    if (pme->mDataCB != NULL) {
+                        pme->mDataCB(frame, pme, pme->mUserData);
+                    } else {
+                        // no data cb routine, return buf here
+                        pme->bufDone(frame->bufs[0]->buf_idx);
+                        free(frame);
+                    }
+                }
+            }
+            break;
+        case CAMERA_CMD_TYPE_EXIT:
+            CDBG_HIGH("%s: Exit", __func__);
+            /* flush data buf queue */
+            pme->mDataQ.flush();
+            running = 0;
+            break;
+        default:
+            break;
+        }
+    } while (running);
+    CDBG_HIGH("%s: X", __func__);
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : bufDone
+ *
+ * DESCRIPTION: return stream buffer to kernel
+ *
+ * PARAMETERS :
+ *   @index   : index of buffer to be returned
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::bufDone(uint32_t index)
+{
+    int32_t rc = NO_ERROR;
+
+    if (index >= mNumBufs || mBufDefs == NULL)
+        return BAD_INDEX;
+
+    rc = mCamOps->qbuf(mCamHandle, mChannelHandle, &mBufDefs[index]);
+    if (rc < 0)
+        return rc;
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : bufDone
+ *
+ * DESCRIPTION: return stream buffer to kernel
+ *
+ * PARAMETERS :
+ *   @opaque    : stream frame/metadata buf to be returned
+ *   @isMetaData: flag if returned opaque is a metadatabuf or the real frame ptr
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::bufDone(const void *opaque, bool isMetaData)
+{
+    int32_t rc = NO_ERROR;
+    int index;
+
+    if (mStreamInfo != NULL
+            && mStreamInfo->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        index = mStreamBatchBufs->getMatchBufIndex(opaque, TRUE);
+        if (index == -1 || index >= mNumBufs || mBufDefs == NULL) {
+            ALOGE("%s: Cannot find buf for opaque data = %p", __func__, opaque);
+            return BAD_INDEX;
+        }
+        camera_memory_t *video_mem = mStreamBatchBufs->getMemory(index, true);
+        if (video_mem != NULL) {
+            struct encoder_media_buffer_type * packet =
+                    (struct encoder_media_buffer_type *)video_mem->data;
+            native_handle_t *nh = const_cast<native_handle_t *>(packet->meta_handle);
+            if (NULL != nh) {
+               if (native_handle_delete(nh)) {
+                   ALOGE("%s: Unable to delete native handle", __func__);
+               }
+            } else {
+               ALOGE("%s : native handle not available", __func__);
+            }
+        }
+    } else {
+        index = mStreamBufs->getMatchBufIndex(opaque, isMetaData);
+        if (index == -1 || index >= mNumBufs || mBufDefs == NULL) {
+            ALOGE("%s: Cannot find buf for opaque data = %p", __func__, opaque);
+            return BAD_INDEX;
+        }
+        CDBG_HIGH("%s: Buffer Index = %d, Frame Idx = %d", __func__, index,
+                mBufDefs[index].frame_idx);
+    }
+    rc = bufDone((uint32_t)index);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumQueuedBuf
+ *
+ * DESCRIPTION: return queued buffer count
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : queued buffer count
+ *==========================================================================*/
+int32_t QCameraStream::getNumQueuedBuf()
+{
+    int32_t rc = -1;
+    if (mHandle > 0) {
+        rc = mCamOps->get_queued_buf_count(mCamHandle, mChannelHandle, mHandle);
+    }
+    if (rc == -1) {
+        ALOGE("%s: stream is not in active state. Invalid operation", __func__);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufs
+ *
+ * DESCRIPTION: allocate stream buffers
+ *
+ * PARAMETERS :
+ *   @offset     : offset info of stream buffers
+ *   @num_bufs   : number of buffers allocated
+ *   @initial_reg_flag: flag to indicate if buffer needs to be registered
+ *                      at kernel initially
+ *   @bufs       : output of allocated buffers
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::getBufs(cam_frame_len_offset_t *offset,
+        uint8_t *num_bufs,
+        uint8_t **initial_reg_flag,
+        mm_camera_buf_def_t **bufs,
+        mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int rc = NO_ERROR;
+    uint8_t *regFlags;
+
+    if (!ops_tbl) {
+        ALOGE("%s: ops_tbl is NULL", __func__);
+        return INVALID_OPERATION;
+    }
+
+    mFrameLenOffset = *offset;
+
+    uint8_t numBufAlloc = mNumBufs;
+    mNumBufsNeedAlloc = 0;
+    if (mDynBufAlloc) {
+        numBufAlloc = CAMERA_MIN_ALLOCATED_BUFFERS;
+        if (numBufAlloc > mNumBufs) {
+            mDynBufAlloc = false;
+            numBufAlloc = mNumBufs;
+        } else {
+            mNumBufsNeedAlloc = (uint8_t)(mNumBufs - numBufAlloc);
+        }
+    }
+
+    //Allocate stream buffer
+    mStreamBufs = mAllocator.allocateStreamBuf(mStreamInfo->stream_type,
+            mFrameLenOffset.frame_len, mFrameLenOffset.mp[0].stride,
+            mFrameLenOffset.mp[0].scanline, numBufAlloc);
+    if (!mStreamBufs) {
+        ALOGE("%s: Failed to allocate stream buffers", __func__);
+        return NO_MEMORY;
+    }
+
+    mNumBufs = (uint8_t)(numBufAlloc + mNumBufsNeedAlloc);
+
+    for (uint32_t i = 0; i < numBufAlloc; i++) {
+        ssize_t bufSize = mStreamBufs->getSize(i);
+        if (BAD_INDEX != bufSize) {
+            rc = ops_tbl->map_ops(i, -1, mStreamBufs->getFd(i),
+                    (uint32_t)bufSize, CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+            if (rc < 0) {
+                ALOGE("%s: map_stream_buf failed: %d", __func__, rc);
+                for (uint32_t j = 0; j < i; j++) {
+                    ops_tbl->unmap_ops(j, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+                }
+                mStreamBufs->deallocate();
+                delete mStreamBufs;
+                mStreamBufs = NULL;
+                return INVALID_OPERATION;
+            }
+        } else {
+            ALOGE("Failed to retrieve buffer size (bad index)");
+            return INVALID_OPERATION;
+        }
+    }
+
+    //regFlags array is allocated by us, but consumed and freed by mm-camera-interface
+    regFlags = (uint8_t *)malloc(sizeof(uint8_t) * mNumBufs);
+    if (!regFlags) {
+        ALOGE("%s: Out of memory", __func__);
+        for (uint32_t i = 0; i < numBufAlloc; i++) {
+            ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+        }
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+        mStreamBufs = NULL;
+        return NO_MEMORY;
+    }
+    memset(regFlags, 0, sizeof(uint8_t) * mNumBufs);
+
+    mBufDefs = (mm_camera_buf_def_t *)malloc(mNumBufs * sizeof(mm_camera_buf_def_t));
+    if (mBufDefs == NULL) {
+        ALOGE("%s: getRegFlags failed %d", __func__, rc);
+        for (uint32_t i = 0; i < numBufAlloc; i++) {
+            ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+        }
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+        mStreamBufs = NULL;
+        free(regFlags);
+        regFlags = NULL;
+        return INVALID_OPERATION;
+    }
+    memset(mBufDefs, 0, mNumBufs * sizeof(mm_camera_buf_def_t));
+    for (uint32_t i = 0; i < numBufAlloc; i++) {
+        mStreamBufs->getBufDef(mFrameLenOffset, mBufDefs[i], i);
+    }
+
+    rc = mStreamBufs->getRegFlags(regFlags);
+    if (rc < 0) {
+        ALOGE("%s: getRegFlags failed %d", __func__, rc);
+        for (uint32_t i = 0; i < numBufAlloc; i++) {
+            ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+        }
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+        mStreamBufs = NULL;
+        free(mBufDefs);
+        mBufDefs = NULL;
+        free(regFlags);
+        regFlags = NULL;
+        return INVALID_OPERATION;
+    }
+
+    *num_bufs = mNumBufs;
+    *initial_reg_flag = regFlags;
+    *bufs = mBufDefs;
+    CDBG_HIGH("%s: stream type: %d, mRegFlags: 0x%x, numBufs: %d",
+            __func__, mStreamInfo->stream_type, regFlags, mBufDefs);
+
+    if (mNumBufsNeedAlloc > 0) {
+        pthread_mutex_lock(&m_lock);
+        wait_for_cond = TRUE;
+        pthread_mutex_unlock(&m_lock);
+        CDBG_HIGH("%s: Still need to allocate %d buffers",
+              __func__, mNumBufsNeedAlloc);
+        // remember memops table
+        m_MemOpsTbl = *ops_tbl;
+        // start another thread to allocate the rest of buffers
+        pthread_create(&mBufAllocPid,
+                       NULL,
+                       BufAllocRoutine,
+                       this);
+        pthread_setname_np(mBufAllocPid, "CAM_strmBufAlloc");
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateBuffers
+ *
+ * DESCRIPTION: allocate stream buffers
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::allocateBuffers()
+{
+    int rc = NO_ERROR;
+
+    mFrameLenOffset = mStreamInfo->buf_planes.plane_info;
+
+    if (mStreamInfo->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        return allocateBatchBufs(&mFrameLenOffset,
+                &mNumBufs, &mRegFlags,
+                &mBufDefs, NULL);
+    }
+
+    //Allocate and map stream info buffer
+    mStreamBufs = mAllocator.allocateStreamBuf(mStreamInfo->stream_type,
+            mFrameLenOffset.frame_len,
+            mFrameLenOffset.mp[0].stride,
+            mFrameLenOffset.mp[0].scanline,
+            mNumBufs);
+
+    if (!mStreamBufs) {
+        ALOGE("%s: Failed to allocate stream buffers", __func__);
+        return NO_MEMORY;
+    }
+
+    for (uint32_t i = 0; i < mNumBufs; i++) {
+        ssize_t bufSize = mStreamBufs->getSize(i);
+        if (BAD_INDEX != bufSize) {
+            rc = mapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1,
+                    mStreamBufs->getFd(i), (size_t)bufSize, NULL);
+            ALOGE_IF((rc < 0), "%s: map_stream_buf failed: %d", __func__, rc);
+        } else {
+            ALOGE("%s: Bad index %u", __func__, i);
+            rc = BAD_INDEX;
+        }
+        if (rc < 0) {
+            ALOGE("%s: Cleanup after error: %d", __func__, rc);
+            for (uint32_t j = 0; j < i; j++) {
+                unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, j, -1, NULL);
+            }
+            mStreamBufs->deallocate();
+            delete mStreamBufs;
+            mStreamBufs = NULL;
+            return INVALID_OPERATION;
+        }
+    }
+
+    //regFlags array is allocated by us,
+    // but consumed and freed by mm-camera-interface
+    mRegFlags = (uint8_t *)malloc(sizeof(uint8_t) * mNumBufs);
+    if (!mRegFlags) {
+        ALOGE("%s: Out of memory", __func__);
+        for (uint32_t i = 0; i < mNumBufs; i++) {
+            unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1, NULL);
+        }
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+        mStreamBufs = NULL;
+        return NO_MEMORY;
+    }
+    memset(mRegFlags, 0, sizeof(uint8_t) * mNumBufs);
+
+    size_t bufDefsSize = mNumBufs * sizeof(mm_camera_buf_def_t);
+    mBufDefs = (mm_camera_buf_def_t *)malloc(bufDefsSize);
+    if (mBufDefs == NULL) {
+        ALOGE("%s: getRegFlags failed %d", __func__, rc);
+        for (uint32_t i = 0; i < mNumBufs; i++) {
+            unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1, NULL);
+        }
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+        mStreamBufs = NULL;
+        free(mRegFlags);
+        mRegFlags = NULL;
+        return INVALID_OPERATION;
+    }
+    memset(mBufDefs, 0, bufDefsSize);
+    for (uint32_t i = 0; i < mNumBufs; i++) {
+        mStreamBufs->getBufDef(mFrameLenOffset, mBufDefs[i], i);
+    }
+
+    rc = mStreamBufs->getRegFlags(mRegFlags);
+    if (rc < 0) {
+        ALOGE("%s: getRegFlags failed %d", __func__, rc);
+        for (uint32_t i = 0; i < mNumBufs; i++) {
+            unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1, NULL);
+        }
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+        mStreamBufs = NULL;
+        free(mBufDefs);
+        mBufDefs = NULL;
+        free(mRegFlags);
+        mRegFlags = NULL;
+        return INVALID_OPERATION;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateBatchBufs
+ *
+ * DESCRIPTION: allocate stream batch buffers and stream buffers
+ *
+ * PARAMETERS :
+ *   @offset     : offset info of stream buffers
+ *   @num_bufs   : number of buffers allocated
+ *   @initial_reg_flag: flag to indicate if buffer needs to be registered
+ *                      at kernel initially
+ *   @bufs       : output of allocated buffers
+ *   @plane_bufs    : output of allocated plane buffers
+  *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::allocateBatchBufs(cam_frame_len_offset_t *offset,
+        uint8_t *num_bufs, uint8_t **initial_reg_flag,
+        mm_camera_buf_def_t **bufs, mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int rc = NO_ERROR;
+    uint8_t *regFlags;
+
+    mFrameLenOffset = *offset;
+
+    CDBG_HIGH("%s : Batch Buffer allocation stream type = %d", __func__, getMyType());
+
+    //Allocate stream batch buffer
+    mStreamBatchBufs = mAllocator.allocateStreamUserBuf (mStreamInfo);
+    if (!mStreamBatchBufs) {
+        ALOGE("%s: Failed to allocate stream batch buffers", __func__);
+        return NO_MEMORY;
+    }
+
+    //map batch buffers
+    for (uint32_t i = 0; i < mStreamBatchBufs->getCnt(); i++) {
+        rc = mapBuf(CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF, i, -1,
+                    mStreamBatchBufs->getFd(i), (size_t)mNumBufs, ops_tbl);
+        if (rc < 0) {
+            ALOGE("Failed to map stream batch buffer");
+            for (uint32_t j = 0; j < i; j++) {
+                unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF, j, -1, ops_tbl);
+            }
+            mStreamBatchBufs->deallocate();
+            delete mStreamBatchBufs;
+            mStreamBatchBufs = NULL;
+            return NO_MEMORY;
+        }
+    }
+
+    /*calculate stream Buffer count*/
+    mNumPlaneBufs =
+            (mNumBufs * mStreamInfo->user_buf_info.frame_buf_cnt);
+
+    //Allocate stream buffer
+    mStreamBufs = mAllocator.allocateStreamBuf(mStreamInfo->stream_type,
+            mFrameLenOffset.frame_len,mFrameLenOffset.mp[0].stride,
+            mFrameLenOffset.mp[0].scanline,mNumPlaneBufs);
+    if (!mStreamBufs) {
+        ALOGE("%s: Failed to allocate stream buffers", __func__);
+        rc = NO_MEMORY;
+        goto err1;
+    }
+
+    //Map plane stream buffers
+    for (uint32_t i = 0; i < mNumPlaneBufs; i++) {
+        ssize_t bufSize = mStreamBufs->getSize(i);
+        if (BAD_INDEX != bufSize) {
+            rc = mapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1,
+                    mStreamBufs->getFd(i), (size_t)bufSize, ops_tbl);
+            if (rc < 0) {
+                ALOGE("%s: map_stream_buf failed: %d", __func__, rc);
+                for (uint32_t j = 0; j < i; j++) {
+                    unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, j, -1, ops_tbl);
+                }
+                mStreamBufs->deallocate();
+                delete mStreamBufs;
+                mStreamBufs = NULL;
+                rc = INVALID_OPERATION;
+                goto err1;
+            }
+        } else {
+            ALOGE("Failed to retrieve buffer size (bad index)");
+            mStreamBufs->deallocate();
+            delete mStreamBufs;
+            mStreamBufs = NULL;
+            rc = INVALID_OPERATION;
+            goto err1;
+        }
+    }
+
+    CDBG ("%s: BATCH Buf Count = %d, Plane Buf Cnt = %d", __func__,
+            mNumBufs, mNumPlaneBufs);
+
+    //regFlags array is allocated by us, but consumed and freed by mm-camera-interface
+    regFlags = (uint8_t *)malloc(sizeof(uint8_t) * mNumBufs);
+    if (!regFlags) {
+        ALOGE("%s: Out of memory", __func__);
+        for (uint32_t i = 0; i < mNumPlaneBufs; i++) {
+            unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1, ops_tbl);
+        }
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+        mStreamBufs = NULL;
+        rc = NO_MEMORY;
+        goto err1;
+    }
+    memset(regFlags, 0, sizeof(uint8_t) * mNumBufs);
+    for (uint32_t i = 0; i < mNumBufs; i++) {
+        regFlags[i] = 1;
+    }
+
+    mBufDefs = (mm_camera_buf_def_t *)malloc(mNumBufs * sizeof(mm_camera_buf_def_t));
+    if (mBufDefs == NULL) {
+        ALOGE("%s: getRegFlags failed %d", __func__, rc);
+        for (uint32_t i = 0; i < mNumPlaneBufs; i++) {
+            unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1, ops_tbl);
+        }
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+        mStreamBufs = NULL;
+        free(regFlags);
+        regFlags = NULL;
+        rc = INVALID_OPERATION;
+        goto err1;
+    }
+    memset(mBufDefs, 0, mNumBufs * sizeof(mm_camera_buf_def_t));
+
+    mPlaneBufDefs = (mm_camera_buf_def_t *)
+            malloc(mNumPlaneBufs * (sizeof(mm_camera_buf_def_t)));
+    if (mPlaneBufDefs == NULL) {
+        ALOGE("%s : No Memory", __func__);
+        free(regFlags);
+        regFlags = NULL;
+        free(mBufDefs);
+        mBufDefs = NULL;
+        for (uint32_t i = 0; i < mNumPlaneBufs; i++) {
+            unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1, ops_tbl);
+        }
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+        mStreamBufs = NULL;
+        free(regFlags);
+        regFlags = NULL;
+        rc = INVALID_OPERATION;
+        goto err1;
+    }
+    memset(mPlaneBufDefs, 0,
+             mNumPlaneBufs * (sizeof(mm_camera_buf_def_t)));
+
+    for (uint32_t i = 0; i < mStreamInfo->num_bufs; i++) {
+        mStreamBatchBufs->getUserBufDef(mStreamInfo->user_buf_info,
+                mBufDefs[i], i, mFrameLenOffset, mPlaneBufDefs,
+                mStreamBufs);
+    }
+
+    *num_bufs = mNumBufs;
+    *initial_reg_flag = regFlags;
+    *bufs = mBufDefs;
+    CDBG_HIGH("%s: stream type: %d, numBufs: %d mNumPlaneBufs: %d",
+            __func__, mStreamInfo->stream_type, mNumBufs, mNumPlaneBufs);
+
+    return NO_ERROR;
+
+err1:
+    for (uint8_t i = 0; i < mStreamBatchBufs->getCnt(); i++) {
+        unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF, i, -1, ops_tbl);
+    }
+    mStreamBatchBufs->deallocate();
+    delete mStreamBatchBufs;
+    mStreamBatchBufs = NULL;
+    return rc;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : releaseBuffs
+ *
+ * DESCRIPTION: method to deallocate stream buffers
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::releaseBuffs()
+{
+    int rc = NO_ERROR;
+
+    if (mStreamInfo->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        return releaseBatchBufs(NULL);
+    }
+
+    if (NULL != mBufDefs) {
+        for (uint32_t i = 0; i < mNumBufs; i++) {
+            rc = unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1, NULL);
+            if (rc < 0) {
+                ALOGE("%s: map_stream_buf failed: %d", __func__, rc);
+            }
+        }
+
+        // mBufDefs just keep a ptr to the buffer
+        // mm-camera-interface own the buffer, so no need to free
+        mBufDefs = NULL;
+        memset(&mFrameLenOffset, 0, sizeof(mFrameLenOffset));
+    }
+    if (!mStreamBufsAcquired && mStreamBufs != NULL) {
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseBatchBufs
+ *
+ * DESCRIPTION: method to deallocate stream buffers and batch buffers
+ *
+ * PARAMETERS :
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+
+ *==========================================================================*/
+int32_t QCameraStream::releaseBatchBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int rc = NO_ERROR;
+
+    if (NULL != mPlaneBufDefs) {
+        for (uint32_t i = 0; i < mNumPlaneBufs; i++) {
+            rc = unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_BUF, i, -1, ops_tbl);
+            if (rc < 0) {
+                ALOGE("%s: map_stream_buf failed: %d", __func__, rc);
+            }
+        }
+
+        // mBufDefs just keep a ptr to the buffer
+        // mm-camera-interface own the buffer, so no need to free
+        mPlaneBufDefs = NULL;
+        memset(&mFrameLenOffset, 0, sizeof(mFrameLenOffset));
+        mNumPlaneBufs = 0;
+    }
+
+    if ( mStreamBufs != NULL) {
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+    }
+
+    mBufDefs = NULL;
+
+    if (mStreamBatchBufs != NULL) {
+        for (uint8_t i = 0; i < mStreamBatchBufs->getCnt(); i++) {
+            unmapBuf(CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF, i, -1, ops_tbl);
+        }
+        mStreamBatchBufs->deallocate();
+        delete mStreamBatchBufs;
+        mStreamBatchBufs = NULL;
+    }
+    return rc;
+
+}
+
+/*===========================================================================
+ * FUNCTION   : BufAllocRoutine
+ *
+ * DESCRIPTION: function to allocate additional stream buffers
+ *
+ * PARAMETERS :
+ *   @data    : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void *QCameraStream::BufAllocRoutine(void *data)
+{
+    QCameraStream *pme = (QCameraStream *)data;
+    int32_t rc = NO_ERROR;
+
+    CDBG_HIGH("%s: E", __func__);
+    pme->cond_wait();
+    if (pme->mNumBufsNeedAlloc > 0) {
+        uint8_t numBufAlloc = (uint8_t)(pme->mNumBufs - pme->mNumBufsNeedAlloc);
+        rc = pme->mAllocator.allocateMoreStreamBuf(pme->mStreamBufs,
+                                                   pme->mFrameLenOffset.frame_len,
+                                                   pme->mNumBufsNeedAlloc);
+        if (rc == NO_ERROR){
+            for (uint32_t i = numBufAlloc; i < pme->mNumBufs; i++) {
+                ssize_t bufSize = pme->mStreamBufs->getSize(i);
+                if (BAD_INDEX != bufSize) {
+                    rc = pme->m_MemOpsTbl.map_ops(i, -1, pme->mStreamBufs->getFd(i),
+                            (uint32_t)bufSize, CAM_MAPPING_BUF_TYPE_STREAM_BUF,
+                            pme->m_MemOpsTbl.userdata);
+                    if (rc == 0) {
+                        pme->mStreamBufs->getBufDef(pme->mFrameLenOffset, pme->mBufDefs[i], i);
+                        pme->mCamOps->qbuf(pme->mCamHandle, pme->mChannelHandle,
+                                &pme->mBufDefs[i]);
+                    } else {
+                        ALOGE("%s: map_stream_buf %d failed: %d", __func__, rc, i);
+                    }
+                } else {
+                    ALOGE("Failed to retrieve buffer size (bad index)");
+                }
+            }
+
+            pme->mNumBufsNeedAlloc = 0;
+        }
+    }
+    CDBG_HIGH("%s: X", __func__);
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : cond_signal
+ *
+ * DESCRIPTION: signal if flag "wait_for_cond" is set
+ *
+ *==========================================================================*/
+void QCameraStream::cond_signal()
+{
+    pthread_mutex_lock(&m_lock);
+    if(wait_for_cond == TRUE){
+        wait_for_cond = FALSE;
+        pthread_cond_signal(&m_cond);
+    }
+    pthread_mutex_unlock(&m_lock);
+}
+
+
+/*===========================================================================
+ * FUNCTION   : cond_wait
+ *
+ * DESCRIPTION: wait on if flag "wait_for_cond" is set
+ *
+ *==========================================================================*/
+void QCameraStream::cond_wait()
+{
+    pthread_mutex_lock(&m_lock);
+    while (wait_for_cond == TRUE) {
+        pthread_cond_wait(&m_cond, &m_lock);
+    }
+    pthread_mutex_unlock(&m_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : putBufs
+ *
+ * DESCRIPTION: deallocate stream buffers
+ *
+ * PARAMETERS :
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::putBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int rc = NO_ERROR;
+
+    if (mBufAllocPid != 0) {
+        CDBG_HIGH("%s: wait for buf allocation thread dead", __func__);
+        pthread_join(mBufAllocPid, NULL);
+        mBufAllocPid = 0;
+        CDBG_HIGH("%s: return from buf allocation thread", __func__);
+    }
+
+    for (uint32_t i = 0; i < mNumBufs; i++) {
+        rc = ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+        if (rc < 0) {
+            ALOGE("%s: map_stream_buf failed: %d", __func__, rc);
+        }
+    }
+    mBufDefs = NULL; // mBufDefs just keep a ptr to the buffer
+                     // mm-camera-interface own the buffer, so no need to free
+    memset(&mFrameLenOffset, 0, sizeof(mFrameLenOffset));
+    if ( !mStreamBufsAcquired ) {
+        mStreamBufs->deallocate();
+        delete mStreamBufs;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : invalidateBuf
+ *
+ * DESCRIPTION: invalidate a specific stream buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer to invalidate
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::invalidateBuf(uint32_t index)
+{
+    return mStreamBufs->invalidateCache(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : cleanInvalidateBuf
+ *
+ * DESCRIPTION: clean invalidate a specific stream buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer to clean invalidate
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::cleanInvalidateBuf(uint32_t index)
+{
+    return mStreamBufs->cleanInvalidateCache(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : isTypeOf
+ *
+ * DESCRIPTION: helper function to determine if the stream is of the queried type
+ *
+ * PARAMETERS :
+ *   @type    : stream type as of queried
+ *
+ * RETURN     : true/false
+ *==========================================================================*/
+bool QCameraStream::isTypeOf(cam_stream_type_t type)
+{
+    if (mStreamInfo != NULL && (mStreamInfo->stream_type == type)) {
+        return true;
+    } else {
+        return false;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : isOrignalTypeOf
+ *
+ * DESCRIPTION: helper function to determine if the original stream is of the
+ *              queried type if it's reproc stream
+ *
+ * PARAMETERS :
+ *   @type    : stream type as of queried
+ *
+ * RETURN     : true/false
+ *==========================================================================*/
+bool QCameraStream::isOrignalTypeOf(cam_stream_type_t type)
+{
+    if (mStreamInfo != NULL &&
+        mStreamInfo->stream_type == CAM_STREAM_TYPE_OFFLINE_PROC &&
+        mStreamInfo->reprocess_config.pp_type == CAM_ONLINE_REPROCESS_TYPE &&
+        mStreamInfo->reprocess_config.online.input_stream_type == type) {
+        return true;
+    } else if (
+        mStreamInfo != NULL &&
+        mStreamInfo->stream_type == CAM_STREAM_TYPE_OFFLINE_PROC &&
+        mStreamInfo->reprocess_config.pp_type == CAM_OFFLINE_REPROCESS_TYPE &&
+        mStreamInfo->reprocess_config.offline.input_type == type) {
+        return true;
+    } else {
+        return false;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getMyType
+ *
+ * DESCRIPTION: return stream type
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : stream type
+ *==========================================================================*/
+cam_stream_type_t QCameraStream::getMyType()
+{
+    if (mStreamInfo != NULL) {
+        return mStreamInfo->stream_type;
+    } else {
+        return CAM_STREAM_TYPE_DEFAULT;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getMyOriginalType
+ *
+ * DESCRIPTION: return stream type
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : stream type
+ *==========================================================================*/
+cam_stream_type_t QCameraStream::getMyOriginalType()
+{
+    if (mStreamInfo != NULL) {
+        if (mStreamInfo->stream_type == CAM_STREAM_TYPE_OFFLINE_PROC &&
+                mStreamInfo->reprocess_config.pp_type == CAM_ONLINE_REPROCESS_TYPE) {
+            return mStreamInfo->reprocess_config.online.input_stream_type;
+        } else if (mStreamInfo->stream_type == CAM_STREAM_TYPE_OFFLINE_PROC &&
+                mStreamInfo->reprocess_config.pp_type == CAM_OFFLINE_REPROCESS_TYPE) {
+            return mStreamInfo->reprocess_config.offline.input_type;
+        } else {
+            return mStreamInfo->stream_type;
+        }
+    } else {
+        return CAM_STREAM_TYPE_DEFAULT;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getFrameOffset
+ *
+ * DESCRIPTION: query stream buffer frame offset info
+ *
+ * PARAMETERS :
+ *   @offset  : reference to struct to store the queried frame offset info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::getFrameOffset(cam_frame_len_offset_t &offset)
+{
+    if (NULL == mStreamInfo) {
+        return NO_INIT;
+    }
+
+    offset = mFrameLenOffset;
+    if ((ROTATE_90 == mOnlineRotation) || (ROTATE_270 == mOnlineRotation)) {
+        // Re-calculate frame offset in case of online rotation
+        cam_stream_info_t streamInfo = *mStreamInfo;
+        getFrameDimension(streamInfo.dim);
+        calcOffset(&streamInfo);
+        offset = streamInfo.buf_planes.plane_info;
+    }
+
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : getCropInfo
+ *
+ * DESCRIPTION: query crop info of the stream
+ *
+ * PARAMETERS :
+ *   @crop    : reference to struct to store the queried crop info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::getCropInfo(cam_rect_t &crop)
+{
+    pthread_mutex_lock(&mCropLock);
+    crop = mCropInfo;
+    pthread_mutex_unlock(&mCropLock);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setCropInfo
+ *
+ * DESCRIPTION: set crop info of the stream
+ *
+ * PARAMETERS :
+ *   @crop    : struct to store new crop info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::setCropInfo(cam_rect_t crop)
+{
+    pthread_mutex_lock(&mCropLock);
+    mCropInfo = crop;
+    pthread_mutex_unlock(&mCropLock);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFrameDimension
+ *
+ * DESCRIPTION: query stream frame dimension info
+ *
+ * PARAMETERS :
+ *   @dim     : reference to struct to store the queried frame dimension
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::getFrameDimension(cam_dimension_t &dim)
+{
+    if (mStreamInfo != NULL) {
+        if ((ROTATE_90 == mOnlineRotation) || (ROTATE_270 == mOnlineRotation)) {
+            dim.width = mStreamInfo->dim.height;
+            dim.height = mStreamInfo->dim.width;
+        } else {
+            dim = mStreamInfo->dim;
+        }
+        return 0;
+    }
+    return -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFormat
+ *
+ * DESCRIPTION: query stream format
+ *
+ * PARAMETERS :
+ *   @fmt     : reference to stream format
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::getFormat(cam_format_t &fmt)
+{
+    if (mStreamInfo != NULL) {
+        fmt = mStreamInfo->fmt;
+        return 0;
+    }
+    return -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : getMyServerID
+ *
+ * DESCRIPTION: query server stream ID
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : stream ID from server
+ *==========================================================================*/
+uint32_t QCameraStream::getMyServerID() {
+    if (mStreamInfo != NULL) {
+        return mStreamInfo->stream_svr_id;
+    } else {
+        return 0;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : acquireStreamBufs
+ *
+ * DESCRIPTION: acquire stream buffers and postpone their release.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::acquireStreamBufs()
+{
+    mStreamBufsAcquired = true;
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : mapBuf
+ *
+ * DESCRIPTION: map stream related buffer to backend server
+ *
+ * PARAMETERS :
+ *   @buf_type : mapping type of buffer
+ *   @buf_idx  : index of buffer
+ *   @plane_idx: plane index
+ *   @fd       : fd of the buffer
+ *   @size     : lenght of the buffer
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::mapBuf(uint8_t buf_type, uint32_t buf_idx,
+        int32_t plane_idx, int fd, size_t size, mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    if (ops_tbl != NULL) {
+        return ops_tbl->map_ops(buf_idx, plane_idx, fd,
+                (uint32_t)size, (cam_mapping_buf_type)buf_type, ops_tbl->userdata);
+    } else {
+        return mCamOps->map_stream_buf(mCamHandle, mChannelHandle,
+                mHandle, buf_type, buf_idx, plane_idx,
+                fd, size);
+    }
+
+}
+
+/*===========================================================================
+ * FUNCTION   : unmapBuf
+ *
+ * DESCRIPTION: unmap stream related buffer to backend server
+ *
+ * PARAMETERS :
+ *   @buf_type : mapping type of buffer
+ *   @buf_idx  : index of buffer
+ *   @plane_idx: plane index
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::unmapBuf(uint8_t buf_type, uint32_t buf_idx, int32_t plane_idx,
+        mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    if (ops_tbl != NULL) {
+        return ops_tbl->unmap_ops(buf_idx, plane_idx,
+                (cam_mapping_buf_type)buf_type, ops_tbl->userdata);
+    } else {
+        return mCamOps->unmap_stream_buf(mCamHandle, mChannelHandle,
+                mHandle, buf_type, buf_idx, plane_idx);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : setParameter
+ *
+ * DESCRIPTION: set stream based parameters
+ *
+ * PARAMETERS :
+ *   @param   : ptr to parameters to be set
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::setParameter(cam_stream_parm_buffer_t &param)
+{
+    int32_t rc = NO_ERROR;
+    pthread_mutex_lock(&mParameterLock);
+    mStreamInfo->parm_buf = param;
+    rc = mCamOps->set_stream_parms(mCamHandle,
+                                   mChannelHandle,
+                                   mHandle,
+                                   &mStreamInfo->parm_buf);
+    if (rc == NO_ERROR) {
+        param = mStreamInfo->parm_buf;
+    }
+    pthread_mutex_unlock(&mParameterLock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getParameter
+ *
+ * DESCRIPTION: get stream based parameters
+ *
+ * PARAMETERS :
+ *   @param   : ptr to parameters to be red
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::getParameter(cam_stream_parm_buffer_t &param)
+{
+    int32_t rc = NO_ERROR;
+    pthread_mutex_lock(&mParameterLock);
+    mStreamInfo->parm_buf = param;
+    rc = mCamOps->get_stream_parms(mCamHandle,
+                                   mChannelHandle,
+                                   mHandle,
+                                   &mStreamInfo->parm_buf);
+    if (rc == NO_ERROR) {
+        param = mStreamInfo->parm_buf;
+    }
+    pthread_mutex_unlock(&mParameterLock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseFrameData
+ *
+ * DESCRIPTION: callback function to release frame data node
+ *
+ * PARAMETERS :
+ *   @data      : ptr to post process input data
+ *   @user_data : user data ptr (QCameraReprocessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraStream::releaseFrameData(void *data, void *user_data)
+{
+    QCameraStream *pme = (QCameraStream *)user_data;
+    mm_camera_super_buf_t *frame = (mm_camera_super_buf_t *)data;
+    if (NULL != pme) {
+        pme->bufDone(frame->bufs[0]->buf_idx);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : configStream
+ *
+ * DESCRIPTION: send stream configuration to back end
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraStream::configStream()
+{
+    int rc = NO_ERROR;
+
+    // Configure the stream
+    mm_camera_stream_config_t stream_config;
+    stream_config.stream_info = mStreamInfo;
+    stream_config.mem_vtbl = mMemVtbl;
+    stream_config.stream_cb = dataNotifyCB;
+    stream_config.padding_info = mPaddingInfo;
+    stream_config.userdata = this;
+    rc = mCamOps->config_stream(mCamHandle,
+                mChannelHandle, mHandle, &stream_config);
+    if (rc < 0) {
+        ALOGE("Failed to config stream, rc = %d", rc);
+        mCamOps->unmap_stream_buf(mCamHandle,
+                mChannelHandle,
+                mHandle,
+                CAM_MAPPING_BUF_TYPE_STREAM_INFO,
+                0,
+                -1);
+        return UNKNOWN_ERROR;
+    }
+
+    return rc;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL/QCameraStream.h b/camera/QCamera2/HAL/QCameraStream.h
new file mode 100644
index 0000000..bc5fcee
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraStream.h
@@ -0,0 +1,221 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_STREAM_H__
+#define __QCAMERA_STREAM_H__
+
+#include <hardware/camera.h>
+#include "QCameraCmdThread.h"
+#include "QCameraMem.h"
+#include "QCameraAllocator.h"
+
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+namespace qcamera {
+
+class QCameraStream;
+typedef void (*stream_cb_routine)(mm_camera_super_buf_t *frame,
+                                  QCameraStream *stream,
+                                  void *userdata);
+
+class QCameraStream
+{
+public:
+    QCameraStream(QCameraAllocator &allocator,
+            uint32_t camHandle, uint32_t chId,
+            mm_camera_ops_t *camOps, cam_padding_info_t *paddingInfo,
+            bool deffered = false, cam_rotation_t online_rotation = ROTATE_0);
+    virtual ~QCameraStream();
+    virtual int32_t init(QCameraHeapMemory *streamInfoBuf,
+            QCameraHeapMemory *miscBuf,
+            uint8_t minStreamBufNum,
+            stream_cb_routine stream_cb,
+            void *userdata,
+            bool bDynallocBuf);
+    virtual int32_t processZoomDone(preview_stream_ops_t *previewWindow,
+                                    cam_crop_data_t &crop_info);
+    virtual int32_t bufDone(uint32_t index);
+    virtual int32_t bufDone(const void *opaque, bool isMetaData);
+    virtual int32_t processDataNotify(mm_camera_super_buf_t *bufs);
+    virtual int32_t start();
+    virtual int32_t stop();
+
+    /* Used for deffered allocation of buffers */
+    virtual int32_t allocateBuffers();
+    virtual int32_t releaseBuffs();
+
+    static void dataNotifyCB(mm_camera_super_buf_t *recvd_frame, void *userdata);
+    static void *dataProcRoutine(void *data);
+    static void *BufAllocRoutine(void *data);
+    uint32_t getMyHandle() const {return mHandle;}
+    bool isTypeOf(cam_stream_type_t type);
+    bool isOrignalTypeOf(cam_stream_type_t type);
+    int32_t getFrameOffset(cam_frame_len_offset_t &offset);
+    int32_t getCropInfo(cam_rect_t &crop);
+    int32_t setCropInfo(cam_rect_t crop);
+    int32_t getFrameDimension(cam_dimension_t &dim);
+    int32_t getFormat(cam_format_t &fmt);
+    QCameraMemory *getStreamBufs() {return mStreamBufs;};
+    QCameraHeapMemory *getStreamInfoBuf() {return mStreamInfoBuf;};
+    QCameraHeapMemory *getMiscBuf() {return mMiscBuf;};
+    uint32_t getMyServerID();
+    cam_stream_type_t getMyType();
+    cam_stream_type_t getMyOriginalType();
+    int32_t acquireStreamBufs();
+
+    int32_t mapBuf(uint8_t buf_type, uint32_t buf_idx,
+            int32_t plane_idx, int fd, size_t size,
+            mm_camera_map_unmap_ops_tbl_t *ops_tbl = NULL);
+    int32_t unmapBuf(uint8_t buf_type, uint32_t buf_idx, int32_t plane_idx,
+            mm_camera_map_unmap_ops_tbl_t *ops_tbl = NULL);
+    int32_t setParameter(cam_stream_parm_buffer_t &param);
+    int32_t getParameter(cam_stream_parm_buffer_t &param);
+    int32_t syncRuntimeParams();
+    cam_stream_parm_buffer_t getOutputCrop() { return m_OutputCrop;};
+    cam_stream_parm_buffer_t getImgProp() { return m_ImgProp;};
+
+    static void releaseFrameData(void *data, void *user_data);
+    int32_t configStream();
+    bool isDeffered() const { return mDefferedAllocation; }
+    void deleteStream();
+
+    uint8_t getBufferCount() { return mNumBufs; }
+    uint32_t getChannelHandle() { return mChannelHandle; }
+    int32_t getNumQueuedBuf();
+
+    uint32_t mDumpFrame;
+    uint32_t mDumpMetaFrame;
+    uint32_t mDumpSkipCnt;
+
+    void cond_wait();
+    void cond_signal();
+
+private:
+    uint32_t mCamHandle;
+    uint32_t mChannelHandle;
+    uint32_t mHandle; // stream handle from mm-camera-interface
+    mm_camera_ops_t *mCamOps;
+    cam_stream_info_t *mStreamInfo; // ptr to stream info buf
+    mm_camera_stream_mem_vtbl_t mMemVtbl;
+    uint8_t mNumBufs;
+    uint8_t mNumPlaneBufs;
+    uint8_t mNumBufsNeedAlloc;
+    uint8_t *mRegFlags;
+    stream_cb_routine mDataCB;
+    void *mUserData;
+
+    QCameraQueue     mDataQ;
+    QCameraCmdThread mProcTh; // thread for dataCB
+
+    QCameraHeapMemory *mStreamInfoBuf;
+    QCameraHeapMemory *mMiscBuf;
+    QCameraMemory *mStreamBufs;
+    QCameraMemory *mStreamBatchBufs;
+    QCameraAllocator &mAllocator;
+    mm_camera_buf_def_t *mBufDefs;
+    mm_camera_buf_def_t *mPlaneBufDefs;
+    cam_frame_len_offset_t mFrameLenOffset;
+    cam_padding_info_t mPaddingInfo;
+    cam_rect_t mCropInfo;
+    cam_rotation_t mOnlineRotation;
+    pthread_mutex_t mCropLock; // lock to protect crop info
+    pthread_mutex_t mParameterLock; // lock to sync access to parameters
+    bool mStreamBufsAcquired;
+    bool m_bActive; // if stream mProcTh is active
+    bool mDynBufAlloc; // allow buf allocation in 2 steps
+    pthread_t mBufAllocPid;
+    mm_camera_map_unmap_ops_tbl_t m_MemOpsTbl;
+    cam_stream_parm_buffer_t m_OutputCrop;
+    cam_stream_parm_buffer_t m_ImgProp;
+
+    static int32_t get_bufs(
+                     cam_frame_len_offset_t *offset,
+                     uint8_t *num_bufs,
+                     uint8_t **initial_reg_flag,
+                     mm_camera_buf_def_t **bufs,
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                     void *user_data);
+
+    static int32_t get_bufs_deffered(
+            cam_frame_len_offset_t *offset,
+            uint8_t *num_bufs,
+            uint8_t **initial_reg_flag,
+            mm_camera_buf_def_t **bufs,
+            mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+            void *user_data);
+
+    static int32_t put_bufs(
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                     void *user_data);
+
+    static int32_t put_bufs_deffered(
+            mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+            void *user_data);
+
+    static int32_t invalidate_buf(uint32_t index, void *user_data);
+    static int32_t clean_invalidate_buf(uint32_t index, void *user_data);
+
+    int32_t getBufs(cam_frame_len_offset_t *offset,
+                     uint8_t *num_bufs,
+                     uint8_t **initial_reg_flag,
+                     mm_camera_buf_def_t **bufs,
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl);
+    int32_t putBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl);
+
+    /* Used for deffered allocation of buffers */
+    int32_t allocateBatchBufs(cam_frame_len_offset_t *offset,
+            uint8_t *num_bufs, uint8_t **initial_reg_flag,
+            mm_camera_buf_def_t **bufs, mm_camera_map_unmap_ops_tbl_t *ops_tbl);
+
+    int32_t releaseBatchBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl);
+
+    int32_t invalidateBuf(uint32_t index);
+    int32_t cleanInvalidateBuf(uint32_t index);
+    int32_t calcOffset(cam_stream_info_t *streamInfo);
+    int32_t unmapStreamInfoBuf();
+    int32_t releaseStreamInfoBuf();
+    int32_t releaseMiscBuf();
+    int32_t mapBuf(QCameraMemory *heapBuf, cam_mapping_buf_type bufType,
+            mm_camera_map_unmap_ops_tbl_t *ops_tbl = NULL);
+    int32_t unMapBuf(QCameraMemory *heapBuf, cam_mapping_buf_type bufType,
+            mm_camera_map_unmap_ops_tbl_t *ops_tbl = NULL);
+
+    bool mDefferedAllocation;
+
+    bool wait_for_cond;
+    pthread_mutex_t m_lock;
+    pthread_cond_t m_cond;
+
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA_STREAM_H__ */
diff --git a/camera/QCamera2/HAL/QCameraThermalAdapter.cpp b/camera/QCamera2/HAL/QCameraThermalAdapter.cpp
new file mode 100644
index 0000000..10cd847
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraThermalAdapter.cpp
@@ -0,0 +1,172 @@
+/* Copyright (c) 2013-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define LOG_TAG "QCameraThermalAdapter"
+
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <utils/Errors.h>
+
+#include "QCamera2HWI.h"
+#include "QCameraThermalAdapter.h"
+
+using namespace android;
+
+namespace qcamera {
+
+
+QCameraThermalAdapter& QCameraThermalAdapter::getInstance()
+{
+    static QCameraThermalAdapter instance;
+    return instance;
+}
+
+QCameraThermalAdapter::QCameraThermalAdapter() :
+                                        mCallback(NULL),
+                                        mHandle(NULL),
+                                        mRegister(NULL),
+                                        mUnregister(NULL),
+                                        mCameraHandle(0),
+                                        mCamcorderHandle(0)
+{
+}
+
+int QCameraThermalAdapter::init(QCameraThermalCallback *thermalCb)
+{
+    const char *error = NULL;
+    int rc = NO_ERROR;
+
+    CDBG("%s E", __func__);
+    mHandle = dlopen("/vendor/lib/libthermalclient.so", RTLD_NOW);
+    if (!mHandle) {
+        error = dlerror();
+        ALOGE("%s: dlopen failed with error %s",
+                    __func__, error ? error : "");
+        rc = UNKNOWN_ERROR;
+        goto error;
+    }
+    *(void **)&mRegister = dlsym(mHandle, "thermal_client_register_callback");
+    if (!mRegister) {
+        error = dlerror();
+        ALOGE("%s: dlsym failed with error code %s",
+                    __func__, error ? error: "");
+        rc = UNKNOWN_ERROR;
+        goto error2;
+    }
+    *(void **)&mUnregister = dlsym(mHandle, "thermal_client_unregister_callback");
+    if (!mUnregister) {
+        error = dlerror();
+        ALOGE("%s: dlsym failed with error code %s",
+                    __func__, error ? error: "");
+        rc = UNKNOWN_ERROR;
+        goto error2;
+    }
+
+    mCallback = thermalCb;
+
+    // Register camera and camcorder callbacks
+    mCameraHandle = mRegister(mStrCamera, thermalCallback, NULL);
+    if (mCameraHandle < 0) {
+        ALOGE("%s: thermal_client_register_callback failed %d",
+                        __func__, mCameraHandle);
+        rc = UNKNOWN_ERROR;
+        goto error2;
+    }
+    mCamcorderHandle = mRegister(mStrCamcorder, thermalCallback, NULL);
+    if (mCamcorderHandle < 0) {
+        ALOGE("%s: thermal_client_register_callback failed %d",
+                        __func__, mCamcorderHandle);
+        rc = UNKNOWN_ERROR;
+        goto error3;
+    }
+
+    CDBG("%s X", __func__);
+    return rc;
+
+error3:
+    mCamcorderHandle = 0;
+    mUnregister(mCameraHandle);
+error2:
+    mCameraHandle = 0;
+    dlclose(mHandle);
+    mHandle = NULL;
+error:
+    CDBG("%s X", __func__);
+    return rc;
+}
+
+void QCameraThermalAdapter::deinit()
+{
+    CDBG("%s E", __func__);
+    if (mUnregister) {
+        if (mCameraHandle) {
+            mUnregister(mCameraHandle);
+            mCameraHandle = 0;
+        }
+        if (mCamcorderHandle) {
+            mUnregister(mCamcorderHandle);
+            mCamcorderHandle = 0;
+        }
+    }
+    if (mHandle)
+        dlclose(mHandle);
+
+    mHandle = NULL;
+    mRegister = NULL;
+    mUnregister = NULL;
+    mCallback = NULL;
+    CDBG("%s X", __func__);
+}
+
+char QCameraThermalAdapter::mStrCamera[] = "camera";
+char QCameraThermalAdapter::mStrCamcorder[] = "camcorder";
+
+int QCameraThermalAdapter::thermalCallback(int level,
+                void *userdata, void *data)
+{
+    int rc = 0;
+    CDBG("%s E", __func__);
+    QCameraThermalCallback *mcb = getInstance().mCallback;
+
+    if (mcb) {
+        mcb->setThermalLevel((qcamera_thermal_level_enum_t) level);
+        rc = mcb->thermalEvtHandle(mcb->getThermalLevel(), userdata, data);
+    }
+    CDBG("%s X", __func__);
+    return rc;
+}
+
+qcamera_thermal_level_enum_t *QCameraThermalCallback::getThermalLevel() {
+    return &mLevel;
+}
+
+void QCameraThermalCallback::setThermalLevel(qcamera_thermal_level_enum_t level) {
+    mLevel = level;
+}
+}; //namespace qcamera
diff --git a/camera/QCamera2/HAL/QCameraThermalAdapter.h b/camera/QCamera2/HAL/QCameraThermalAdapter.h
new file mode 100644
index 0000000..5b652a8
--- /dev/null
+++ b/camera/QCamera2/HAL/QCameraThermalAdapter.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2013, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_THERMAL_ADAPTER__
+#define __QCAMERA_THERMAL_ADAPTER__
+
+namespace qcamera {
+
+typedef enum {
+    QCAMERA_THERMAL_NO_ADJUSTMENT = 0,
+    QCAMERA_THERMAL_SLIGHT_ADJUSTMENT,
+    QCAMERA_THERMAL_BIG_ADJUSTMENT,
+    QCAMERA_THERMAL_SHUTDOWN
+} qcamera_thermal_level_enum_t;
+
+typedef enum {
+    QCAMERA_THERMAL_ADJUST_FPS,
+    QCAMERA_THERMAL_ADJUST_FRAMESKIP,
+} qcamera_thermal_mode;
+
+class QCameraThermalCallback
+{
+public:
+    virtual int thermalEvtHandle(qcamera_thermal_level_enum_t *level,
+            void *userdata, void *data) = 0;
+    virtual ~QCameraThermalCallback() {}
+    qcamera_thermal_level_enum_t *getThermalLevel();
+    void setThermalLevel(qcamera_thermal_level_enum_t level);
+
+private:
+    qcamera_thermal_level_enum_t mLevel;
+};
+
+class QCameraThermalAdapter
+{
+public:
+    static QCameraThermalAdapter& getInstance();
+
+    int init(QCameraThermalCallback *thermalCb);
+    void deinit();
+
+private:
+    static char mStrCamera[];
+    static char mStrCamcorder[];
+
+    static int thermalCallback(int level, void *userdata, void *data);
+
+    QCameraThermalCallback *mCallback;
+    void *mHandle;
+    int (*mRegister)(char *name,
+            int (*callback)(int, void *userdata, void *data), void *data);
+    int (*mUnregister)(int handle);
+    int mCameraHandle;
+    int mCamcorderHandle;
+
+    QCameraThermalAdapter();
+    QCameraThermalAdapter(QCameraThermalAdapter const& copy); // not implemented
+    QCameraThermalAdapter& operator=(QCameraThermalAdapter const& copy); // not implemented
+
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA_THERMAL_ADAPTER__ */
diff --git a/camera/QCamera2/HAL/wrapper/QualcommCamera.cpp b/camera/QCamera2/HAL/wrapper/QualcommCamera.cpp
new file mode 100644
index 0000000..d55406f
--- /dev/null
+++ b/camera/QCamera2/HAL/wrapper/QualcommCamera.cpp
@@ -0,0 +1,450 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define ALOG_NIDEBUG 0
+#define LOG_TAG "QualcommCamera"
+#include <utils/Log.h>
+#include <utils/threads.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <binder/IMemory.h>
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <utils/RefBase.h>
+
+#include "QualcommCamera.h"
+#include "QCamera2Factory.h"
+#include "QCamera2HWI.h"
+
+
+extern "C" {
+#include <sys/time.h>
+}
+
+/* HAL function implementation goes here*/
+
+/**
+ * The functions need to be provided by the camera HAL.
+ *
+ * If getNumberOfCameras() returns N, the valid cameraId for getCameraInfo()
+ * and openCameraHardware() is 0 to N-1.
+ */
+
+
+static hw_module_methods_t camera_module_methods = {
+    open: camera_device_open,
+};
+
+static hw_module_t camera_common = {
+    tag: HARDWARE_MODULE_TAG,
+    module_api_version: CAMERA_MODULE_API_VERSION_1_0,
+    hal_api_version: HARDWARE_HAL_API_VERSION,
+    id: CAMERA_HARDWARE_MODULE_ID,
+    name: "QCamera Module",
+    author: "Quic on behalf of CAF",
+    methods: &camera_module_methods,
+    dso: NULL,
+    reserved:  {0},
+};
+
+using namespace qcamera;
+namespace android {
+
+typedef struct {
+    camera_device hw_dev;
+    QCamera2HardwareInterface *hardware;
+    int camera_released;
+    int cameraId;
+} camera_hardware_t;
+
+typedef struct {
+  camera_memory_t mem;
+  int32_t msgType;
+  sp<IMemory> dataPtr;
+  void* user;
+  unsigned int index;
+} q_cam_memory_t;
+
+QCamera2HardwareInterface *util_get_Hal_obj( struct camera_device * device)
+{
+    QCamera2HardwareInterface *hardware = NULL;
+    if(device && device->priv){
+        camera_hardware_t *camHal = (camera_hardware_t *)device->priv;
+        hardware = camHal->hardware;
+    }
+    return hardware;
+}
+
+extern "C" int get_number_of_cameras()
+{
+    /* try to query every time we get the call!*/
+
+    ALOGE("Q%s: E", __func__);
+    return QCamera2Factory::get_number_of_cameras();
+}
+
+extern "C" int get_camera_info(int camera_id, struct camera_info *info)
+{
+    int rc = -1;
+    ALOGE("Q%s: E", __func__);
+
+    if(info) {
+        QCamera2Factory::get_camera_info(camera_id, info);
+    }
+    CDBG("Q%s: X", __func__);
+    return rc;
+}
+
+
+/* HAL should return NULL if it fails to open camera hardware. */
+extern "C" int  camera_device_open(
+  const struct hw_module_t* module, const char* id,
+          struct hw_device_t** hw_device)
+{
+    int rc = -1;
+    camera_device *device = NULL;
+
+    if(module && id && hw_device) {
+        if (!strcmp(module->name, camera_common.name)) {
+            int cameraId = atoi(id);
+
+            camera_hardware_t *camHal =
+                (camera_hardware_t *) malloc(sizeof (camera_hardware_t));
+            if(!camHal) {
+                *hw_device = NULL;
+                ALOGE("%s:  end in no mem", __func__);
+                return rc;
+            }
+            /* we have the camera_hardware obj malloced */
+            memset(camHal, 0, sizeof (camera_hardware_t));
+            camHal->hardware = new QCamera2HardwareInterface((uint32_t)cameraId);
+            if (camHal->hardware) {
+                camHal->cameraId = cameraId;
+                device = &camHal->hw_dev;
+                device->common.close = close_camera_device;
+                device->ops = &QCamera2HardwareInterface::mCameraOps;
+                device->priv = (void *)camHal;
+                rc =  0;
+            } else {
+                if (camHal->hardware) {
+                    delete camHal->hardware;
+                    camHal->hardware = NULL;
+                }
+                free(camHal);
+                device = NULL;
+                goto EXIT;
+            }
+        }
+    }
+    /* pass actual hw_device ptr to framework. This amkes that we actally be use memberof() macro */
+    *hw_device = (hw_device_t*)&device->common;
+
+EXIT:
+
+    ALOGE("%s:  end rc %d", __func__, rc);
+    return rc;
+}
+
+extern "C"  int close_camera_device( hw_device_t *hw_dev)
+{
+    ALOGE("Q%s: device =%p E", __func__, hw_dev);
+    int rc =  -1;
+    camera_device_t *device = (camera_device_t *)hw_dev;
+
+    if(device) {
+        camera_hardware_t *camHal = (camera_hardware_t *)device->priv;
+        if(camHal ) {
+            QCamera2HardwareInterface *hardware = util_get_Hal_obj( device);
+            if(!camHal->camera_released) {
+                if(hardware != NULL) {
+                    hardware->release(device);
+                }
+            }
+            if(hardware != NULL)
+                delete hardware;
+            free(camHal);
+        }
+        rc = 0;
+    }
+    return rc;
+}
+
+
+int set_preview_window(struct camera_device * device,
+        struct preview_stream_ops *window)
+{
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+
+    if(hardware != NULL) {
+        rc = hardware->set_preview_window(device, window);
+    }
+    return rc;
+}
+
+void set_CallBacks(struct camera_device * device,
+        camera_notify_callback notify_cb,
+        camera_data_callback data_cb,
+        camera_data_timestamp_callback data_cb_timestamp,
+        camera_request_memory get_memory,
+        void *user)
+{
+    ALOGE("Q%s: E", __func__);
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        hardware->set_CallBacks(device, notify_cb,data_cb, data_cb_timestamp, get_memory, user);
+    }
+}
+
+void enable_msg_type(struct camera_device * device, int32_t msg_type)
+{
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        hardware->enable_msg_type(device, msg_type);
+    }
+}
+
+void disable_msg_type(struct camera_device * device, int32_t msg_type)
+{
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    ALOGE("Q%s: E", __func__);
+    if(hardware != NULL){
+        hardware->disable_msg_type(device, msg_type);
+    }
+}
+
+int msg_type_enabled(struct camera_device * device, int32_t msg_type)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->msg_type_enabled(device, msg_type);
+    }
+    return rc;
+}
+
+int start_preview(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->start_preview(device);
+    }
+    ALOGE("Q%s: X", __func__);
+    return rc;
+}
+
+void stop_preview(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        hardware->stop_preview(device);
+    }
+}
+
+int preview_enabled(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->preview_enabled(device);
+    }
+    return rc;
+}
+
+int store_meta_data_in_buffers(struct camera_device * device, int enable)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+      rc = hardware->store_meta_data_in_buffers(device, enable);
+    }
+    return rc;
+}
+
+int start_recording(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->start_recording(device);
+    }
+    return rc;
+}
+
+void stop_recording(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        hardware->stop_recording(device);
+    }
+}
+
+int recording_enabled(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->recording_enabled(device);
+    }
+    return rc;
+}
+
+void release_recording_frame(struct camera_device * device,
+                const void *opaque)
+{
+    CDBG("Q%s: E", __func__);
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        hardware->release_recording_frame(device, opaque);
+    }
+}
+
+int auto_focus(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->auto_focus(device);
+    }
+    return rc;
+}
+
+int cancel_auto_focus(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->cancel_auto_focus(device);
+    }
+    return rc;
+}
+
+int take_picture(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->take_picture(device);
+    }
+    return rc;
+}
+
+int cancel_picture(struct camera_device * device)
+
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->cancel_picture(device);
+    }
+    return rc;
+}
+
+int set_parameters(struct camera_device * device, const char *parms)
+
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL && parms){
+        rc = hardware->set_parameters(device, parms);
+  }
+  return rc;
+}
+
+char* get_parameters(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        char *parms = NULL;
+        parms = hardware->get_parameters(device);
+        return parms;
+    }
+    return NULL;
+}
+
+void put_parameters(struct camera_device * device, char *parm)
+
+{
+    ALOGE("Q%s: E", __func__);
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+      hardware->put_parameters(device, parm);
+    }
+}
+
+int send_command(struct camera_device * device,
+            int32_t cmd, int32_t arg1, int32_t arg2)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->send_command(device, cmd, arg1, arg2);
+    }
+    return rc;
+}
+
+void release(struct camera_device * device)
+{
+    ALOGE("Q%s: E", __func__);
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        camera_hardware_t *camHal = (camera_hardware_t *)device->priv;
+        hardware->release(device);
+        camHal->camera_released = true;
+    }
+}
+
+int dump(struct camera_device * device, int fd)
+{
+    ALOGE("Q%s: E", __func__);
+    int rc = -1;
+    QCamera2HardwareInterface *hardware = util_get_Hal_obj(device);
+    if(hardware != NULL){
+        rc = hardware->dump(device, fd);
+    }
+    return rc;
+}
+
+}; // namespace android
diff --git a/camera/QCamera2/HAL/wrapper/QualcommCamera.h b/camera/QCamera2/HAL/wrapper/QualcommCamera.h
new file mode 100644
index 0000000..f3def21
--- /dev/null
+++ b/camera/QCamera2/HAL/wrapper/QualcommCamera.h
@@ -0,0 +1,107 @@
+/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef ANDROID_HARDWARE_QUALCOMM_CAMERA_H
+#define ANDROID_HARDWARE_QUALCOMM_CAMERA_H
+
+
+#include "QCamera2HWI.h"
+
+extern "C" {
+
+  int get_number_of_cameras();
+  int get_camera_info(int camera_id, struct camera_info *info);
+
+  int camera_device_open(const struct hw_module_t* module, const char* id,
+          struct hw_device_t** device);
+
+  hw_device_t * open_camera_device(int cameraId);
+
+  int close_camera_device( hw_device_t *);
+
+namespace android {
+  int set_preview_window(struct camera_device *,
+          struct preview_stream_ops *window);
+  void set_CallBacks(struct camera_device *,
+          camera_notify_callback notify_cb,
+          camera_data_callback data_cb,
+          camera_data_timestamp_callback data_cb_timestamp,
+          camera_request_memory get_memory,
+          void *user);
+
+  void enable_msg_type(struct camera_device *, int32_t msg_type);
+
+  void disable_msg_type(struct camera_device *, int32_t msg_type);
+  int msg_type_enabled(struct camera_device *, int32_t msg_type);
+
+  int start_preview(struct camera_device *);
+
+  void stop_preview(struct camera_device *);
+
+  int preview_enabled(struct camera_device *);
+  int store_meta_data_in_buffers(struct camera_device *, int enable);
+
+  int start_recording(struct camera_device *);
+
+  void stop_recording(struct camera_device *);
+
+  int recording_enabled(struct camera_device *);
+
+  void release_recording_frame(struct camera_device *,
+                  const void *opaque);
+
+  int auto_focus(struct camera_device *);
+
+  int cancel_auto_focus(struct camera_device *);
+
+  int take_picture(struct camera_device *);
+
+  int cancel_picture(struct camera_device *);
+
+  int set_parameters(struct camera_device *, const char *parms);
+
+  char* get_parameters(struct camera_device *);
+
+  void put_parameters(struct camera_device *, char *);
+
+  int send_command(struct camera_device *,
+              int32_t cmd, int32_t arg1, int32_t arg2);
+
+  void release(struct camera_device *);
+
+  int dump(struct camera_device *, int fd);
+
+
+
+}; // namespace android
+
+} //extern "C"
+
+#endif
+
diff --git a/camera/QCamera2/HAL3/QCamera3Channel.cpp b/camera/QCamera2/HAL3/QCamera3Channel.cpp
new file mode 100644
index 0000000..22adde5
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3Channel.cpp
@@ -0,0 +1,4431 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+#define LOG_TAG "QCamera3Channel"
+//#define LOG_NDEBUG 0
+#include <fcntl.h>
+#include <stdlib.h>
+#include <cstdlib>
+#include <stdio.h>
+#include <string.h>
+#include <linux/videodev2.h>
+#include <hardware/camera3.h>
+#include <system/camera_metadata.h>
+#include <gralloc_priv.h>
+#include <utils/Log.h>
+#include <utils/Errors.h>
+#include <utils/Trace.h>
+#include <cutils/properties.h>
+#include "QCamera3Channel.h"
+#include "QCamera3HWI.h"
+
+using namespace android;
+
+
+namespace qcamera {
+#define VIDEO_FORMAT    CAM_FORMAT_YUV_420_NV12
+#define SNAPSHOT_FORMAT CAM_FORMAT_YUV_420_NV21
+#define PREVIEW_FORMAT  CAM_FORMAT_YUV_420_NV12_VENUS
+#define DEFAULT_FORMAT  CAM_FORMAT_YUV_420_NV21
+#define CALLBACK_FORMAT CAM_FORMAT_YUV_420_NV21
+#define RAW_FORMAT      CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG
+#define IS_BUFFER_ERROR(x) (((x) & V4L2_QCOM_BUF_DATA_CORRUPT) == V4L2_QCOM_BUF_DATA_CORRUPT)
+
+/*===========================================================================
+ * FUNCTION   : QCamera3Channel
+ *
+ * DESCRIPTION: constrcutor of QCamera3Channel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3Channel::QCamera3Channel(uint32_t cam_handle,
+                               uint32_t channel_handle,
+                               mm_camera_ops_t *cam_ops,
+                               channel_cb_routine cb_routine,
+                               cam_padding_info_t *paddingInfo,
+                               uint32_t postprocess_mask,
+                               void *userData, uint32_t numBuffers)
+{
+    m_camHandle = cam_handle;
+    m_handle = channel_handle;
+    m_camOps = cam_ops;
+    m_bIsActive = false;
+
+    m_numStreams = 0;
+    memset(mStreams, 0, sizeof(mStreams));
+    mUserData = userData;
+
+    mStreamInfoBuf = NULL;
+    mChannelCB = cb_routine;
+    mPaddingInfo = paddingInfo;
+
+    mPostProcMask = postprocess_mask;
+
+    char prop[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.yuv.dump", prop, "0");
+    mYUVDump = (uint8_t) atoi(prop);
+    mIsType = IS_TYPE_NONE;
+    mNumBuffers = numBuffers;
+    mPerFrameMapUnmapEnable = true;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3Channel
+ *
+ * DESCRIPTION: destructor of QCamera3Channel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3Channel::~QCamera3Channel()
+{
+    if (m_bIsActive)
+        stop();
+
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+        if (mStreams[i] != NULL) {
+            delete mStreams[i];
+            mStreams[i] = 0;
+        }
+    }
+    m_numStreams = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : addStream
+ *
+ * DESCRIPTION: add a stream into channel
+ *
+ * PARAMETERS :
+ *   @streamType     : stream type
+ *   @streamFormat   : stream format
+ *   @streamDim      : stream dimension
+ *   @streamRotation : rotation of the stream
+ *   @minStreamBufNum : minimal buffer count for particular stream type
+ *   @postprocessMask : post-proccess feature mask
+ *   @isType         : type of image stabilization required on the stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Channel::addStream(cam_stream_type_t streamType,
+                                  cam_format_t streamFormat,
+                                  cam_dimension_t streamDim,
+                                  cam_rotation_t streamRotation,
+                                  uint8_t minStreamBufNum,
+                                  uint32_t postprocessMask,
+                                  cam_is_type_t isType,
+                                  uint32_t batchSize)
+{
+    int32_t rc = NO_ERROR;
+
+    if (m_numStreams >= 1) {
+        ALOGE("%s: Only one stream per channel supported in v3 Hal", __func__);
+        return BAD_VALUE;
+    }
+
+    if (m_numStreams >= MAX_STREAM_NUM_IN_BUNDLE) {
+        ALOGE("%s: stream number (%d) exceeds max limit (%d)",
+              __func__, m_numStreams, MAX_STREAM_NUM_IN_BUNDLE);
+        return BAD_VALUE;
+    }
+    QCamera3Stream *pStream = new QCamera3Stream(m_camHandle,
+                                               m_handle,
+                                               m_camOps,
+                                               mPaddingInfo,
+                                               this);
+    if (pStream == NULL) {
+        ALOGE("%s: No mem for Stream", __func__);
+        return NO_MEMORY;
+    }
+    CDBG("%s: batch size is %d", __func__, batchSize);
+
+    rc = pStream->init(streamType, streamFormat, streamDim, streamRotation,
+            NULL, minStreamBufNum, postprocessMask, isType, batchSize,
+            streamCbRoutine, this);
+    if (rc == 0) {
+        mStreams[m_numStreams] = pStream;
+        m_numStreams++;
+    } else {
+        delete pStream;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : start
+ *
+ * DESCRIPTION: start channel, which will start all streams belong to this channel
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Channel::start()
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+
+    if (m_numStreams > 1) {
+        ALOGE("%s: bundle not supported", __func__);
+    } else if (m_numStreams == 0) {
+        return NO_INIT;
+    }
+
+    if(m_bIsActive) {
+        ALOGD("%s: Attempt to start active channel", __func__);
+        return rc;
+    }
+
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+        if (mStreams[i] != NULL) {
+            mStreams[i]->start();
+        }
+    }
+
+    m_bIsActive = true;
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop
+ *
+ * DESCRIPTION: stop a channel, which will stop all streams belong to this channel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Channel::stop()
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+    if(!m_bIsActive) {
+        ALOGE("%s: Attempt to stop inactive channel", __func__);
+        return rc;
+    }
+
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+        if (mStreams[i] != NULL) {
+            mStreams[i]->stop();
+        }
+    }
+
+    m_bIsActive = false;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setBatchSize
+ *
+ * DESCRIPTION: Set batch size for the channel. This is a dummy implementation
+ *              for the base class
+ *
+ * PARAMETERS :
+ *   @batchSize  : Number of image buffers in a batch
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success always
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Channel::setBatchSize(uint32_t batchSize)
+{
+    CDBG("%s: Dummy method. batchSize: %d unused ", __func__, batchSize);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : queueBatchBuf
+ *
+ * DESCRIPTION: This is a dummy implementation for the base class
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success always
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Channel::queueBatchBuf()
+{
+    CDBG("%s: Dummy method. Unused ", __func__);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setPerFrameMapUnmap
+ *
+ * DESCRIPTION: Sets internal enable flag
+ *
+ * PARAMETERS :
+ *  @enable : Bool value for the enable flag
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success always
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Channel::setPerFrameMapUnmap(bool enable)
+{
+    mPerFrameMapUnmapEnable = enable;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : bufDone
+ *
+ * DESCRIPTION: return a stream buf back to kernel
+ *
+ * PARAMETERS :
+ *   @recvd_frame  : stream buf frame to be returned
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Channel::bufDone(mm_camera_super_buf_t *recvd_frame)
+{
+    int32_t rc = NO_ERROR;
+    for (uint32_t i = 0; i < recvd_frame->num_bufs; i++) {
+         if (recvd_frame->bufs[i] != NULL) {
+             for (uint32_t j = 0; j < m_numStreams; j++) {
+                 if (mStreams[j] != NULL &&
+                     mStreams[j]->getMyHandle() == recvd_frame->bufs[i]->stream_id) {
+                     rc = mStreams[j]->bufDone(recvd_frame->bufs[i]->buf_idx);
+                     break; // break loop j
+                 }
+             }
+         }
+    }
+
+    return rc;
+}
+
+int32_t QCamera3Channel::setBundleInfo(const cam_bundle_config_t &bundleInfo)
+{
+    int32_t rc = NO_ERROR;
+    cam_stream_parm_buffer_t param;
+    memset(&param, 0, sizeof(cam_stream_parm_buffer_t));
+    param.type = CAM_STREAM_PARAM_TYPE_SET_BUNDLE_INFO;
+    param.bundleInfo = bundleInfo;
+
+    if (mStreams[0] != NULL) {
+        rc = mStreams[0]->setParameter(param);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: stream setParameter for set bundle failed", __func__);
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamTypeMask
+ *
+ * DESCRIPTION: Get bit mask of all stream types in this channel
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : Bit mask of all stream types in this channel
+ *==========================================================================*/
+uint32_t QCamera3Channel::getStreamTypeMask()
+{
+    uint32_t mask = 0;
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+       mask |= (1U << mStreams[i]->getMyType());
+    }
+    return mask;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamID
+ *
+ * DESCRIPTION: Get StreamID of requested stream type
+ *
+ * PARAMETERS : streamMask
+ *
+ * RETURN     : Stream ID
+ *==========================================================================*/
+uint32_t QCamera3Channel::getStreamID(uint32_t streamMask)
+{
+    uint32_t streamID = 0;
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+        if (streamMask == (uint32_t )(0x1 << mStreams[i]->getMyType())) {
+            streamID = mStreams[i]->getMyServerID();
+            break;
+        }
+    }
+    return streamID;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamByHandle
+ *
+ * DESCRIPTION: return stream object by stream handle
+ *
+ * PARAMETERS :
+ *   @streamHandle : stream handle
+ *
+ * RETURN     : stream object. NULL if not found
+ *==========================================================================*/
+QCamera3Stream *QCamera3Channel::getStreamByHandle(uint32_t streamHandle)
+{
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+        if (mStreams[i] != NULL && mStreams[i]->getMyHandle() == streamHandle) {
+            return mStreams[i];
+        }
+    }
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamByIndex
+ *
+ * DESCRIPTION: return stream object by index
+ *
+ * PARAMETERS :
+ *   @streamHandle : stream handle
+ *
+ * RETURN     : stream object. NULL if not found
+ *==========================================================================*/
+QCamera3Stream *QCamera3Channel::getStreamByIndex(uint32_t index)
+{
+    if (index < m_numStreams) {
+        return mStreams[index];
+    }
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : streamCbRoutine
+ *
+ * DESCRIPTION: callback routine for stream
+ *
+ * PARAMETERS :
+ *   @streamHandle : stream handle
+ *
+ * RETURN     : stream object. NULL if not found
+ *==========================================================================*/
+void QCamera3Channel::streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                QCamera3Stream *stream, void *userdata)
+{
+    QCamera3Channel *channel = (QCamera3Channel *)userdata;
+    if (channel == NULL) {
+        ALOGE("%s: invalid channel pointer", __func__);
+        return;
+    }
+    channel->streamCbRoutine(super_frame, stream);
+}
+
+/*===========================================================================
+ * FUNCTION   : dumpYUV
+ *
+ * DESCRIPTION: function to dump the YUV data from ISP/pproc
+ *
+ * PARAMETERS :
+ *   @frame   : frame to be dumped
+ *   @dim     : dimension of the stream
+ *   @offset  : offset of the data
+ *   @name    : 1 if it is ISP output/pproc input, 2 if it is pproc output
+ *
+ * RETURN  :
+ *==========================================================================*/
+void QCamera3Channel::dumpYUV(mm_camera_buf_def_t *frame, cam_dimension_t dim,
+        cam_frame_len_offset_t offset, uint8_t name)
+{
+    char buf[FILENAME_MAX];
+    memset(buf, 0, sizeof(buf));
+    static int counter = 0;
+    /* Note that the image dimension will be the unrotated stream dimension.
+     * If you feel that the image would have been rotated during reprocess
+     * then swap the dimensions while opening the file
+     * */
+    snprintf(buf, sizeof(buf), QCAMERA_DUMP_FRM_LOCATION"%d_%d_%d_%dx%d.yuv",
+            name, counter, frame->frame_idx, dim.width, dim.height);
+    counter++;
+    int file_fd = open(buf, O_RDWR| O_CREAT, 0644);
+    if (file_fd >= 0) {
+        ssize_t written_len = write(file_fd, frame->buffer, offset.frame_len);
+        ALOGE("%s: written number of bytes %d", __func__, written_len);
+        close(file_fd);
+    } else {
+        ALOGE("%s: failed to open file to dump image", __func__);
+    }
+}
+
+/* QCamera3ProcessingChannel methods */
+
+/*===========================================================================
+ * FUNCTION   : QCamera3ProcessingChannel
+ *
+ * DESCRIPTION: constructor of QCamera3ProcessingChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *   @cb_routine : callback routine to frame aggregator
+ *   @paddingInfo: stream padding info
+ *   @userData   : HWI handle
+ *   @stream     : camera3_stream_t structure
+ *   @stream_type: Channel stream type
+ *   @postprocess_mask: the postprocess mask for streams of this channel
+ *   @metadataChannel: handle to the metadataChannel
+ *   @numBuffers : number of max dequeued buffers
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3ProcessingChannel::QCamera3ProcessingChannel(uint32_t cam_handle,
+        uint32_t channel_handle,
+        mm_camera_ops_t *cam_ops,
+        channel_cb_routine cb_routine,
+        cam_padding_info_t *paddingInfo,
+        void *userData,
+        camera3_stream_t *stream,
+        cam_stream_type_t stream_type,
+        uint32_t postprocess_mask,
+        QCamera3Channel *metadataChannel,
+        uint32_t numBuffers) :
+            QCamera3Channel(cam_handle, channel_handle, cam_ops, cb_routine,
+                    paddingInfo, postprocess_mask, userData, numBuffers),
+            m_postprocessor(this),
+            mMemory(numBuffers),
+            mCamera3Stream(stream),
+            mNumBufs(CAM_MAX_NUM_BUFS_PER_STREAM),
+            mStreamType(stream_type),
+            mPostProcStarted(false),
+            mInputBufferConfig(false),
+            m_pMetaChannel(metadataChannel),
+            mMetaFrame(NULL),
+            mOfflineMemory(0),
+            mOfflineMetaMemory(numBuffers + (MAX_REPROCESS_PIPELINE_STAGES - 1),
+                    false)
+{
+    int32_t rc = m_postprocessor.init(&mMemory, mPostProcMask);
+    if (rc != 0) {
+        ALOGE("Init Postprocessor failed");
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3ProcessingChannel
+ *
+ * DESCRIPTION: destructor of QCamera3ProcessingChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3ProcessingChannel::~QCamera3ProcessingChannel()
+{
+    stop();
+
+    int32_t rc = m_postprocessor.stop();
+    if (rc != NO_ERROR) {
+        ALOGE("%s: Postprocessor stop failed", __func__);
+    }
+
+    rc = m_postprocessor.deinit();
+    if (rc != 0) {
+        ALOGE("De-init Postprocessor failed");
+    }
+
+    if (0 < mOfflineMetaMemory.getCnt()) {
+        mOfflineMetaMemory.deallocate();
+    }
+    if (0 < mOfflineMemory.getCnt()) {
+        mOfflineMemory.unregisterBuffers();
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : streamCbRoutine
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ * @super_frame : the super frame with filled buffer
+ * @stream      : stream on which the buffer was requested and filled
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3ProcessingChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame,
+        QCamera3Stream *stream)
+{
+     ATRACE_CALL();
+    //FIXME Q Buf back in case of error?
+    uint8_t frameIndex;
+    buffer_handle_t *resultBuffer;
+    int32_t resultFrameNumber;
+    camera3_stream_buffer_t result;
+
+    if (checkStreamCbErrors(super_frame, stream) != NO_ERROR) {
+        ALOGE("%s: Error with the stream callback", __func__);
+        return;
+    }
+
+    frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx;
+    if(frameIndex >= mNumBufs) {
+         ALOGE("%s: Error, Invalid index for buffer",__func__);
+         stream->bufDone(frameIndex);
+         return;
+    }
+
+    ////Use below data to issue framework callback
+    resultBuffer = (buffer_handle_t *)mMemory.getBufferHandle(frameIndex);
+    resultFrameNumber = mMemory.getFrameNumber(frameIndex);
+
+    result.stream = mCamera3Stream;
+    result.buffer = resultBuffer;
+    if (IS_BUFFER_ERROR(super_frame->bufs[0]->flags)) {
+        result.status = CAMERA3_BUFFER_STATUS_ERROR;
+        ALOGW("%s: %d CAMERA3_BUFFER_STATUS_ERROR for stream_type: %d",
+            __func__, __LINE__, mStreams[0]->getMyType());
+    } else {
+        result.status = CAMERA3_BUFFER_STATUS_OK;
+    }
+    result.acquire_fence = -1;
+    result.release_fence = -1;
+    if(mPerFrameMapUnmapEnable) {
+        int32_t rc = stream->bufRelease(frameIndex);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: Error %d releasing stream buffer %d",
+                    __func__, rc, frameIndex);
+        }
+
+        rc = mMemory.unregisterBuffer(frameIndex);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: Error %d unregistering stream buffer %d",
+                    __func__, rc, frameIndex);
+        }
+    }
+
+    if (0 <= resultFrameNumber) {
+        if (mChannelCB) {
+            mChannelCB(NULL, &result, (uint32_t)resultFrameNumber, false, mUserData);
+        }
+    } else {
+        ALOGE("%s: Bad frame number", __func__);
+    }
+    free(super_frame);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : putStreamBufs
+ *
+ * DESCRIPTION: release the buffers allocated to the stream
+ *
+ * PARAMETERS : NONE
+ *
+ * RETURN     : NONE
+ *==========================================================================*/
+void QCamera3YUVChannel::putStreamBufs()
+{
+    QCamera3ProcessingChannel::putStreamBufs();
+
+    // Free allocated heap buffer.
+    mMemory.deallocate();
+    // Clear free heap buffer list.
+    mFreeHeapBufferList.clear();
+    // Clear offlinePpInfoList
+    mOfflinePpInfoList.clear();
+}
+
+/*===========================================================================
+ * FUNCTION   : request
+ *
+ * DESCRIPTION: handle the request - either with an input buffer or a direct
+ *              output request
+ *
+ * PARAMETERS :
+ * @buffer          : pointer to the output buffer
+ * @frameNumber     : frame number of the request
+ * @pInputBuffer    : pointer to input buffer if an input request
+ * @metadata        : parameters associated with the request
+ *
+ * RETURN     : 0 on a success start of capture
+ *              -EINVAL on invalid input
+ *              -ENODEV on serious error
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::request(buffer_handle_t *buffer,
+        uint32_t frameNumber,
+        camera3_stream_buffer_t* pInputBuffer,
+        metadata_buffer_t* metadata)
+{
+    int32_t rc = NO_ERROR;
+    int index;
+
+    if (NULL == buffer || NULL == metadata) {
+        ALOGE("%s: Invalid buffer/metadata in channel request", __func__);
+        return BAD_VALUE;
+    }
+
+    if (pInputBuffer) {
+        //need to send to reprocessing
+        CDBG("%s: Got a request with input buffer, output streamType = %d", __func__, mStreamType);
+        reprocess_config_t reproc_cfg;
+        cam_dimension_t dim;
+        memset(&reproc_cfg, 0, sizeof(reprocess_config_t));
+        memset(&dim, 0, sizeof(dim));
+        setReprocConfig(reproc_cfg, pInputBuffer, metadata, mStreamFormat, dim);
+        startPostProc(reproc_cfg);
+
+        qcamera_fwk_input_pp_data_t *src_frame = NULL;
+        src_frame = (qcamera_fwk_input_pp_data_t *)calloc(1,
+                sizeof(qcamera_fwk_input_pp_data_t));
+        if (src_frame == NULL) {
+            ALOGE("%s: No memory for src frame", __func__);
+            return NO_MEMORY;
+        }
+        rc = setFwkInputPPData(src_frame, pInputBuffer, &reproc_cfg, metadata, buffer, frameNumber);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: Error %d while setting framework input PP data", __func__, rc);
+            free(src_frame);
+            return rc;
+        }
+        CDBG_HIGH("%s: Post-process started", __func__);
+        CDBG_HIGH("%s: Issue call to reprocess", __func__);
+        m_postprocessor.processData(src_frame);
+    } else {
+        //need to fill output buffer with new data and return
+        if(!m_bIsActive) {
+            rc = registerBuffer(buffer, mIsType);
+            if (NO_ERROR != rc) {
+                ALOGE("%s: On-the-fly buffer registration failed %d",
+                        __func__, rc);
+                return rc;
+            }
+
+            rc = start();
+            if (NO_ERROR != rc)
+                return rc;
+        } else {
+            CDBG("%s: Request on an existing stream",__func__);
+        }
+
+        index = mMemory.getMatchBufIndex((void*)buffer);
+        if(index < 0) {
+            rc = registerBuffer(buffer, mIsType);
+            if (NO_ERROR != rc) {
+                ALOGE("%s: On-the-fly buffer registration failed %d",
+                        __func__, rc);
+                return rc;
+            }
+
+            index = mMemory.getMatchBufIndex((void*)buffer);
+            if (index < 0) {
+                ALOGE("%s: Could not find object among registered buffers",
+                        __func__);
+                return DEAD_OBJECT;
+            }
+        }
+        rc = mStreams[0]->bufDone(index);
+        if(rc != NO_ERROR) {
+            ALOGE("%s: Failed to Q new buffer to stream",__func__);
+            return rc;
+        }
+        rc = mMemory.markFrameNumber(index, frameNumber);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : initialize
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS : isType : type of image stabilization on the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::initialize(cam_is_type_t isType)
+{
+    int32_t rc = NO_ERROR;
+    rc = mOfflineMetaMemory.allocateAll(sizeof(metadata_buffer_t));
+    if (rc == NO_ERROR) {
+        Mutex::Autolock lock(mFreeOfflineMetaBuffersLock);
+        mFreeOfflineMetaBuffersList.clear();
+        for (uint32_t i = 0; i < mNumBuffers + (MAX_REPROCESS_PIPELINE_STAGES - 1);
+                i++) {
+            mFreeOfflineMetaBuffersList.push_back(i);
+        }
+    } else {
+        ALOGE("%s: Could not allocate offline meta buffers for input reprocess",
+                __func__);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : registerBuffer
+ *
+ * DESCRIPTION: register streaming buffer to the channel object
+ *
+ * PARAMETERS :
+ *   @buffer     : buffer to be registered
+ *   @isType     : image stabilization type on the stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::registerBuffer(buffer_handle_t *buffer,
+        cam_is_type_t isType)
+{
+    ATRACE_CALL();
+    int rc = 0;
+    mIsType = isType;
+    cam_stream_type_t streamType;
+
+    if ((uint32_t)mMemory.getCnt() > (mNumBufs - 1)) {
+        ALOGE("%s: Trying to register more buffers than initially requested",
+                __func__);
+        return BAD_VALUE;
+    }
+
+    if (0 == m_numStreams) {
+        rc = initialize(mIsType);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Couldn't initialize camera stream %d",
+                    __func__, rc);
+            return rc;
+        }
+    }
+
+    streamType = mStreams[0]->getMyType();
+    rc = mMemory.registerBuffer(buffer, streamType);
+    if (ALREADY_EXISTS == rc) {
+        return NO_ERROR;
+    } else if (NO_ERROR != rc) {
+        ALOGE("%s: Buffer %p couldn't be registered %d", __func__, buffer, rc);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFwkInputPPData
+ *
+ * DESCRIPTION: fill out the framework src frame information for reprocessing
+ *
+ * PARAMETERS :
+ *   @src_frame         : input pp data to be filled out
+ *   @pInputBuffer      : input buffer for reprocessing
+ *   @reproc_cfg        : pointer to the reprocess config
+ *   @metadata          : pointer to the metadata buffer
+ *   @output_buffer     : output buffer for reprocessing; could be NULL if not
+ *                        framework allocated
+ *   @frameNumber       : frame number of the request
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::setFwkInputPPData(qcamera_fwk_input_pp_data_t *src_frame,
+        camera3_stream_buffer_t *pInputBuffer, reprocess_config_t *reproc_cfg,
+        metadata_buffer_t *metadata, buffer_handle_t *output_buffer,
+        uint32_t frameNumber)
+{
+    int32_t rc = NO_ERROR;
+    int input_index = mOfflineMemory.getMatchBufIndex((void*)pInputBuffer->buffer);
+    if(input_index < 0) {
+        rc = mOfflineMemory.registerBuffer(pInputBuffer->buffer, mStreamType);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: On-the-fly input buffer registration failed %d",
+                    __func__, rc);
+            return rc;
+        }
+        input_index = mOfflineMemory.getMatchBufIndex((void*)pInputBuffer->buffer);
+        if (input_index < 0) {
+            ALOGE("%s: Could not find object among registered buffers",__func__);
+            return DEAD_OBJECT;
+        }
+    }
+    mOfflineMemory.markFrameNumber(input_index, frameNumber);
+
+    src_frame->src_frame = *pInputBuffer;
+    rc = mOfflineMemory.getBufDef(reproc_cfg->input_stream_plane_info.plane_info,
+            src_frame->input_buffer, input_index);
+    if (rc != 0) {
+        return rc;
+    }
+    if (mYUVDump) {
+       dumpYUV(&src_frame->input_buffer, reproc_cfg->input_stream_dim,
+               reproc_cfg->input_stream_plane_info.plane_info, 1);
+    }
+
+    cam_dimension_t dim = {sizeof(metadata_buffer_t), 1};
+    cam_stream_buf_plane_info_t meta_planes;
+    rc = mm_stream_calc_offset_metadata(&dim, mPaddingInfo, &meta_planes);
+    if (rc != 0) {
+        ALOGE("%s: Metadata stream plane info calculation failed!", __func__);
+        return rc;
+    }
+    uint32_t metaBufIdx;
+    {
+        Mutex::Autolock lock(mFreeOfflineMetaBuffersLock);
+        if (mFreeOfflineMetaBuffersList.empty()) {
+            ALOGE("%s: mFreeOfflineMetaBuffersList is null. Fatal", __func__);
+            return BAD_VALUE;
+        }
+
+        metaBufIdx = *(mFreeOfflineMetaBuffersList.begin());
+        mFreeOfflineMetaBuffersList.erase(mFreeOfflineMetaBuffersList.begin());
+        CDBG("%s: erasing %d, mFreeOfflineMetaBuffersList.size %d", __func__, metaBufIdx,
+                mFreeOfflineMetaBuffersList.size());
+    }
+
+    mOfflineMetaMemory.markFrameNumber(metaBufIdx, frameNumber);
+
+    mm_camera_buf_def_t meta_buf;
+    cam_frame_len_offset_t offset = meta_planes.plane_info;
+    rc = mOfflineMetaMemory.getBufDef(offset, meta_buf, metaBufIdx);
+    if (NO_ERROR != rc) {
+        return rc;
+    }
+    memcpy(meta_buf.buffer, metadata, sizeof(metadata_buffer_t));
+    src_frame->metadata_buffer = meta_buf;
+    src_frame->reproc_config = *reproc_cfg;
+    src_frame->output_buffer = output_buffer;
+    src_frame->frameNumber = frameNumber;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : checkStreamCbErrors
+ *
+ * DESCRIPTION: check the stream callback for errors
+ *
+ * PARAMETERS :
+ *   @super_frame : the super frame with filled buffer
+ *   @stream      : stream on which the buffer was requested and filled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::checkStreamCbErrors(mm_camera_super_buf_t *super_frame,
+        QCamera3Stream *stream)
+{
+    if (NULL == stream) {
+        ALOGE("%s: Invalid stream", __func__);
+        return BAD_VALUE;
+    }
+
+    if(NULL == super_frame) {
+         ALOGE("%s: Invalid Super buffer",__func__);
+         return BAD_VALUE;
+    }
+
+    if(super_frame->num_bufs != 1) {
+         ALOGE("%s: Multiple streams are not supported",__func__);
+         return BAD_VALUE;
+    }
+    if(NULL == super_frame->bufs[0]) {
+         ALOGE("%s: Error, Super buffer frame does not contain valid buffer",
+                  __func__);
+         return BAD_VALUE;
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamSize
+ *
+ * DESCRIPTION: get the size from the camera3_stream_t for the channel
+ *
+ * PARAMETERS :
+ *   @dim     : Return the size of the stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::getStreamSize(cam_dimension_t &dim)
+{
+    if (mCamera3Stream) {
+        dim.width = mCamera3Stream->width;
+        dim.height = mCamera3Stream->height;
+        return NO_ERROR;
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamBufs
+ *
+ * DESCRIPTION: get the buffers allocated to the stream
+ *
+ * PARAMETERS :
+ * @len       : buffer length
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+QCamera3StreamMem* QCamera3ProcessingChannel::getStreamBufs(uint32_t /*len*/)
+{
+    return &mMemory;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : putStreamBufs
+ *
+ * DESCRIPTION: release the buffers allocated to the stream
+ *
+ * PARAMETERS : NONE
+ *
+ * RETURN     : NONE
+ *==========================================================================*/
+void QCamera3ProcessingChannel::putStreamBufs()
+{
+    mMemory.unregisterBuffers();
+
+    /* Reclaim all the offline metabuffers and push them to free list */
+    {
+        Mutex::Autolock lock(mFreeOfflineMetaBuffersLock);
+        mFreeOfflineMetaBuffersList.clear();
+        for (uint32_t i = 0; i < mOfflineMetaMemory.getCnt(); i++) {
+            mFreeOfflineMetaBuffersList.push_back(i);
+        }
+    }
+}
+
+
+/*===========================================================================
+ * FUNCTION   : stop
+ *
+ * DESCRIPTION: stop processing channel, which will stop all streams within,
+ *              including the reprocessing channel in postprocessor.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::stop()
+{
+    int32_t rc = NO_ERROR;
+    if(!m_bIsActive) {
+        ALOGE("%s: Attempt to stop inactive channel",__func__);
+        return rc;
+    }
+
+    m_postprocessor.stop();
+    mPostProcStarted = false;
+    rc |= QCamera3Channel::stop();
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : startPostProc
+ *
+ * DESCRIPTION: figure out if the postprocessor needs to be restarted and if yes
+ *              start it
+ *
+ * PARAMETERS :
+ * @inputBufExists : whether there is an input buffer for post processing
+ * @config         : reprocessing configuration
+ * @metadata       : metadata associated with the reprocessing request
+ *
+ * RETURN     : NONE
+ *==========================================================================*/
+void QCamera3ProcessingChannel::startPostProc(const reprocess_config_t &config)
+{
+    if(!mPostProcStarted) {
+        m_postprocessor.start(config);
+        mPostProcStarted = true;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : queueReprocMetadata
+ *
+ * DESCRIPTION: queue the reprocess metadata to the postprocessor
+ *
+ * PARAMETERS : metadata : the metadata corresponding to the pp frame
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::queueReprocMetadata(mm_camera_super_buf_t *metadata)
+{
+    return m_postprocessor.processPPMetadata(metadata);
+}
+
+/*===========================================================================
+ * FUNCTION : metadataBufDone
+ *
+ * DESCRIPTION: Buffer done method for a metadata buffer
+ *
+ * PARAMETERS :
+ * @recvd_frame : received metadata frame
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::metadataBufDone(mm_camera_super_buf_t *recvd_frame)
+{
+    int32_t rc = NO_ERROR;;
+    if ((NULL == m_pMetaChannel) || (NULL == recvd_frame)) {
+        ALOGE("%s: Metadata channel or metadata buffer invalid", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = ((QCamera3MetadataChannel*)m_pMetaChannel)->bufDone(recvd_frame);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION : translateStreamTypeAndFormat
+ *
+ * DESCRIPTION: translates the framework stream format into HAL stream type
+ *              and format
+ *
+ * PARAMETERS :
+ * @streamType   : translated stream type
+ * @streamFormat : translated stream format
+ * @stream       : fwk stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::translateStreamTypeAndFormat(camera3_stream_t *stream,
+        cam_stream_type_t &streamType, cam_format_t &streamFormat)
+{
+    switch (stream->format) {
+        case HAL_PIXEL_FORMAT_YCbCr_420_888:
+            if(stream->stream_type == CAMERA3_STREAM_INPUT){
+                streamType = CAM_STREAM_TYPE_SNAPSHOT;
+                streamFormat = SNAPSHOT_FORMAT;
+            } else {
+                streamType = CAM_STREAM_TYPE_CALLBACK;
+                streamFormat = CALLBACK_FORMAT;
+            }
+            break;
+        case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+            if (stream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER) {
+                streamType = CAM_STREAM_TYPE_VIDEO;
+                streamFormat = VIDEO_FORMAT;
+            } else if(stream->stream_type == CAMERA3_STREAM_INPUT ||
+                    stream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL ||
+                    IS_USAGE_ZSL(stream->usage)){
+                streamType = CAM_STREAM_TYPE_SNAPSHOT;
+                streamFormat = SNAPSHOT_FORMAT;
+            } else {
+                streamType = CAM_STREAM_TYPE_PREVIEW;
+                streamFormat = PREVIEW_FORMAT;
+            }
+            break;
+        case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+        case HAL_PIXEL_FORMAT_RAW16:
+        case HAL_PIXEL_FORMAT_RAW10:
+            streamType = CAM_STREAM_TYPE_RAW;
+            streamFormat = CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG;
+            break;
+        default:
+            return -EINVAL;
+    }
+    CDBG("%s: fwk_format = %d, streamType = %d, streamFormat = %d", __func__,
+            stream->format, streamType, streamFormat);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION : setReprocConfig
+ *
+ * DESCRIPTION: sets the reprocessing parameters for the input buffer
+ *
+ * PARAMETERS :
+ * @reproc_cfg : the configuration to be set
+ * @pInputBuffer : pointer to the input buffer
+ * @metadata : pointer to the reprocessing metadata buffer
+ * @streamFormat : format of the input stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::setReprocConfig(reprocess_config_t &reproc_cfg,
+        camera3_stream_buffer_t *pInputBuffer,
+        metadata_buffer_t *metadata,
+        cam_format_t streamFormat, cam_dimension_t dim)
+{
+    int32_t rc = 0;
+    reproc_cfg.padding = mPaddingInfo;
+    //to ensure a big enough buffer size set the height and width
+    //padding to max(height padding, width padding)
+    if (reproc_cfg.padding->height_padding > reproc_cfg.padding->width_padding) {
+       reproc_cfg.padding->width_padding = reproc_cfg.padding->height_padding;
+    } else {
+       reproc_cfg.padding->height_padding = reproc_cfg.padding->width_padding;
+    }
+    if (NULL != pInputBuffer) {
+        reproc_cfg.input_stream_dim.width = (int32_t)pInputBuffer->stream->width;
+        reproc_cfg.input_stream_dim.height = (int32_t)pInputBuffer->stream->height;
+    } else {
+        reproc_cfg.input_stream_dim.width = (int32_t)dim.width;
+        reproc_cfg.input_stream_dim.height = (int32_t)dim.height;
+    }
+    reproc_cfg.src_channel = this;
+    reproc_cfg.output_stream_dim.width = mCamera3Stream->width;
+    reproc_cfg.output_stream_dim.height = mCamera3Stream->height;
+    reproc_cfg.reprocess_type = getReprocessType();
+
+    //offset calculation
+    if (NULL != pInputBuffer) {
+        rc = translateStreamTypeAndFormat(pInputBuffer->stream,
+                reproc_cfg.stream_type, reproc_cfg.stream_format);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Stream format %d is not supported", __func__,
+                    pInputBuffer->stream->format);
+            return rc;
+        }
+    } else {
+        reproc_cfg.stream_type = mStreamType;
+        reproc_cfg.stream_format = streamFormat;
+    }
+
+    switch (reproc_cfg.stream_type) {
+        case CAM_STREAM_TYPE_PREVIEW:
+            rc = mm_stream_calc_offset_preview(streamFormat,
+                    &reproc_cfg.input_stream_dim,
+                    &reproc_cfg.input_stream_plane_info);
+            break;
+        case CAM_STREAM_TYPE_VIDEO:
+            rc = mm_stream_calc_offset_video(&reproc_cfg.input_stream_dim,
+                    &reproc_cfg.input_stream_plane_info);
+            break;
+        case CAM_STREAM_TYPE_RAW:
+            rc = mm_stream_calc_offset_raw(streamFormat, &reproc_cfg.input_stream_dim,
+                    reproc_cfg.padding, &reproc_cfg.input_stream_plane_info);
+            break;
+        case CAM_STREAM_TYPE_SNAPSHOT:
+        case CAM_STREAM_TYPE_CALLBACK:
+        default:
+            rc = mm_stream_calc_offset_snapshot(streamFormat, &reproc_cfg.input_stream_dim,
+                    reproc_cfg.padding, &reproc_cfg.input_stream_plane_info);
+            break;
+    }
+    if (rc != 0) {
+        ALOGE("%s: Stream %d plane info calculation failed!", __func__, mStreamType);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : reprocessCbRoutine
+ *
+ * DESCRIPTION: callback function for the reprocessed frame. This frame now
+ *              should be returned to the framework
+ *
+ * PARAMETERS :
+ * @resultBuffer      : buffer containing the reprocessed data
+ * @resultFrameNumber : frame number on which the buffer was requested
+ *
+ * RETURN     : NONE
+ *
+ *==========================================================================*/
+void QCamera3ProcessingChannel::reprocessCbRoutine(buffer_handle_t *resultBuffer,
+        uint32_t resultFrameNumber)
+{
+    ATRACE_CALL();
+    int rc = NO_ERROR;
+
+    rc = releaseOfflineMemory(resultFrameNumber);
+    if (NO_ERROR != rc) {
+        ALOGE("%s: Error releasing offline memory %d", __func__, rc);
+    }
+    /* Since reprocessing is done, send the callback to release the input buffer */
+    if (mChannelCB) {
+        mChannelCB(NULL, NULL, resultFrameNumber, true, mUserData);
+    }
+    issueChannelCb(resultBuffer, resultFrameNumber);
+
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : issueChannelCb
+ *
+ * DESCRIPTION: function to set the result and issue channel callback
+ *
+ * PARAMETERS :
+ * @resultBuffer      : buffer containing the data
+ * @resultFrameNumber : frame number on which the buffer was requested
+ *
+ * RETURN     : NONE
+ *
+ *
+ *==========================================================================*/
+void QCamera3ProcessingChannel::issueChannelCb(buffer_handle_t *resultBuffer,
+        uint32_t resultFrameNumber)
+{
+    camera3_stream_buffer_t result;
+    //Use below data to issue framework callback
+    result.stream = mCamera3Stream;
+    result.buffer = resultBuffer;
+    result.status = CAMERA3_BUFFER_STATUS_OK;
+    result.acquire_fence = -1;
+    result.release_fence = -1;
+
+    if (mChannelCB) {
+        mChannelCB(NULL, &result, resultFrameNumber, false, mUserData);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseOfflineMemory
+ *
+ * DESCRIPTION: function to clean up the offline memory used for input reprocess
+ *
+ * PARAMETERS :
+ * @resultFrameNumber : frame number on which the buffer was requested
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              non-zero failure code
+ *
+ *
+ *==========================================================================*/
+int32_t QCamera3ProcessingChannel::releaseOfflineMemory(uint32_t resultFrameNumber)
+{
+    int32_t rc = NO_ERROR;
+    int32_t inputBufIndex =
+            mOfflineMemory.getGrallocBufferIndex(resultFrameNumber);
+    if (0 <= inputBufIndex) {
+        rc = mOfflineMemory.unregisterBuffer(inputBufIndex);
+    } else {
+        ALOGE("%s: Could not find offline input buffer, resultFrameNumber %d",
+                __func__, resultFrameNumber);
+    }
+    if (rc != NO_ERROR) {
+        ALOGE("%s: Failed to unregister offline input buffer", __func__);
+    }
+
+    int32_t metaBufIndex =
+            mOfflineMetaMemory.getHeapBufferIndex(resultFrameNumber);
+    if (0 <= metaBufIndex) {
+        Mutex::Autolock lock(mFreeOfflineMetaBuffersLock);
+        mFreeOfflineMetaBuffersList.push_back((uint32_t)metaBufIndex);
+    } else {
+        ALOGE("%s: Could not find offline meta buffer, resultFrameNumber %d",
+                __func__, resultFrameNumber);
+    }
+
+    return rc;
+}
+
+/* Regular Channel methods */
+
+/*===========================================================================
+ * FUNCTION   : QCamera3RegularChannel
+ *
+ * DESCRIPTION: constructor of QCamera3RegularChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *   @cb_routine : callback routine to frame aggregator
+ *   @stream     : camera3_stream_t structure
+ *   @stream_type: Channel stream type
+ *   @postprocess_mask: feature mask for postprocessing
+ *   @metadataChannel : metadata channel for the session
+ *   @numBuffers : number of max dequeued buffers
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3RegularChannel::QCamera3RegularChannel(uint32_t cam_handle,
+        uint32_t channel_handle,
+        mm_camera_ops_t *cam_ops,
+        channel_cb_routine cb_routine,
+        cam_padding_info_t *paddingInfo,
+        void *userData,
+        camera3_stream_t *stream,
+        cam_stream_type_t stream_type,
+        uint32_t postprocess_mask,
+        QCamera3Channel *metadataChannel,
+        uint32_t numBuffers) :
+            QCamera3ProcessingChannel(cam_handle, channel_handle, cam_ops,
+                    cb_routine, paddingInfo, userData, stream, stream_type,
+                    postprocess_mask, metadataChannel, numBuffers),
+            mRotation(ROTATE_0),
+            mBatchSize(0)
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3RegularChannel
+ *
+ * DESCRIPTION: destructor of QCamera3RegularChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3RegularChannel::~QCamera3RegularChannel()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : initialize
+ *
+ * DESCRIPTION: Initialize and add camera channel & stream
+ *
+ * PARAMETERS :
+ *    @isType : type of image stabilization required on this stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3RegularChannel::initialize(cam_is_type_t isType)
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+    cam_dimension_t streamDim;
+
+    if (NULL == mCamera3Stream) {
+        ALOGE("%s: Camera stream uninitialized", __func__);
+        return NO_INIT;
+    }
+
+    if (1 <= m_numStreams) {
+        // Only one stream per channel supported in v3 Hal
+        return NO_ERROR;
+    }
+
+    mIsType  = isType;
+
+    rc = translateStreamTypeAndFormat(mCamera3Stream, mStreamType,
+            mStreamFormat);
+    if (rc != NO_ERROR) {
+        return -EINVAL;
+    }
+
+    if ((mStreamType == CAM_STREAM_TYPE_VIDEO) ||
+            (mStreamType == CAM_STREAM_TYPE_PREVIEW)) {
+        if ((mCamera3Stream->rotation != CAMERA3_STREAM_ROTATION_0) &&
+                ((mPostProcMask & CAM_QCOM_FEATURE_ROTATION) == 0)) {
+            ALOGE("%s: attempting rotation %d when rotation is disabled",
+                    __func__,
+                    mCamera3Stream->rotation);
+            return -EINVAL;
+        }
+
+        switch (mCamera3Stream->rotation) {
+            case CAMERA3_STREAM_ROTATION_0:
+                mRotation = ROTATE_0;
+                break;
+            case CAMERA3_STREAM_ROTATION_90: {
+                mRotation = ROTATE_90;
+                break;
+            }
+            case CAMERA3_STREAM_ROTATION_180:
+                mRotation = ROTATE_180;
+                break;
+            case CAMERA3_STREAM_ROTATION_270: {
+                mRotation = ROTATE_270;
+                break;
+            }
+            default:
+                ALOGE("%s: Unknown rotation: %d",
+                        __func__,
+                        mCamera3Stream->rotation);
+                return -EINVAL;
+        }
+    } else if (mCamera3Stream->rotation != CAMERA3_STREAM_ROTATION_0) {
+        ALOGE("%s: Rotation %d is not supported by stream type %d",
+                __func__,
+                mCamera3Stream->rotation,
+                mStreamType);
+        return -EINVAL;
+    }
+
+    streamDim.width = mCamera3Stream->width;
+    streamDim.height = mCamera3Stream->height;
+
+    CDBG("%s: batch size is %d", __func__, mBatchSize);
+    rc = QCamera3Channel::addStream(mStreamType,
+            mStreamFormat,
+            streamDim,
+            mRotation,
+            mNumBufs,
+            mPostProcMask,
+            mIsType,
+            mBatchSize);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setBatchSize
+ *
+ * DESCRIPTION: Set batch size for the channel.
+ *
+ * PARAMETERS :
+ *   @batchSize  : Number of image buffers in a batch
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success always
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3RegularChannel::setBatchSize(uint32_t batchSize)
+{
+    int32_t rc = NO_ERROR;
+
+    mBatchSize = batchSize;
+    CDBG("%s: Batch size set: %d", __func__, mBatchSize);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamTypeMask
+ *
+ * DESCRIPTION: Get bit mask of all stream types in this channel.
+ *              If stream is not initialized, then generate mask based on
+ *              local streamType
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : Bit mask of all stream types in this channel
+ *==========================================================================*/
+uint32_t QCamera3RegularChannel::getStreamTypeMask()
+{
+    if (mStreams[0]) {
+        return QCamera3Channel::getStreamTypeMask();
+    } else {
+        return (1U << mStreamType);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : queueBatchBuf
+ *
+ * DESCRIPTION: queue batch container to downstream
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success always
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3RegularChannel::queueBatchBuf()
+{
+    int32_t rc = NO_ERROR;
+
+    if (mStreams[0]) {
+        rc = mStreams[0]->queueBatchBuf();
+    }
+    if (rc != NO_ERROR) {
+        ALOGE("%s: stream->queueBatchContainer failed", __func__);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : request
+ *
+ * DESCRIPTION: process a request from camera service. Stream on if ncessary.
+ *
+ * PARAMETERS :
+ *   @buffer  : buffer to be filled for this request
+ *
+ * RETURN     : 0 on a success start of capture
+ *              -EINVAL on invalid input
+ *              -ENODEV on serious error
+ *==========================================================================*/
+int32_t QCamera3RegularChannel::request(buffer_handle_t *buffer, uint32_t frameNumber)
+{
+    ATRACE_CALL();
+    //FIX ME: Return buffer back in case of failures below.
+
+    int32_t rc = NO_ERROR;
+    int index;
+
+    if (NULL == buffer) {
+        ALOGE("%s: Invalid buffer in channel request", __func__);
+        return BAD_VALUE;
+    }
+
+    if(!m_bIsActive) {
+        rc = registerBuffer(buffer, mIsType);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: On-the-fly buffer registration failed %d",
+                    __func__, rc);
+            return rc;
+        }
+
+        rc = start();
+        if (NO_ERROR != rc) {
+            return rc;
+        }
+    } else {
+        CDBG("%s: Request on an existing stream",__func__);
+    }
+
+    index = mMemory.getMatchBufIndex((void*)buffer);
+    if(index < 0) {
+        rc = registerBuffer(buffer, mIsType);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: On-the-fly buffer registration failed %d",
+                    __func__, rc);
+            return rc;
+        }
+
+        index = mMemory.getMatchBufIndex((void*)buffer);
+        if (index < 0) {
+            ALOGE("%s: Could not find object among registered buffers",
+                    __func__);
+            return DEAD_OBJECT;
+        }
+    }
+
+    rc = mStreams[0]->bufDone((uint32_t)index);
+    if(rc != NO_ERROR) {
+        ALOGE("%s: Failed to Q new buffer to stream",__func__);
+        return rc;
+    }
+
+    rc = mMemory.markFrameNumber((uint32_t)index, frameNumber);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getReprocessType
+ *
+ * DESCRIPTION: get the type of reprocess output supported by this channel
+ *
+ * PARAMETERS : NONE
+ *
+ * RETURN     : reprocess_type_t : type of reprocess
+ *==========================================================================*/
+reprocess_type_t QCamera3RegularChannel::getReprocessType()
+{
+    return REPROCESS_TYPE_PRIVATE;
+}
+
+QCamera3MetadataChannel::QCamera3MetadataChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    channel_cb_routine cb_routine,
+                    cam_padding_info_t *paddingInfo,
+                    uint32_t postprocess_mask,
+                    void *userData, uint32_t numBuffers) :
+                        QCamera3Channel(cam_handle, channel_handle, cam_ops,
+                                cb_routine, paddingInfo, postprocess_mask,
+                                userData, numBuffers),
+                        mMemory(NULL)
+{
+}
+
+QCamera3MetadataChannel::~QCamera3MetadataChannel()
+{
+    if (m_bIsActive)
+        stop();
+
+    if (mMemory) {
+        mMemory->deallocate();
+        delete mMemory;
+        mMemory = NULL;
+    }
+}
+
+int32_t QCamera3MetadataChannel::initialize(cam_is_type_t isType)
+{
+    ATRACE_CALL();
+    int32_t rc;
+    cam_dimension_t streamDim;
+
+    if (mMemory || m_numStreams > 0) {
+        ALOGE("%s: metadata channel already initialized", __func__);
+        return -EINVAL;
+    }
+
+    streamDim.width = (int32_t)sizeof(metadata_buffer_t),
+    streamDim.height = 1;
+
+    mIsType = isType;
+    rc = QCamera3Channel::addStream(CAM_STREAM_TYPE_METADATA, CAM_FORMAT_MAX,
+            streamDim, ROTATE_0, (uint8_t)mNumBuffers, mPostProcMask, mIsType);
+    if (rc < 0) {
+        ALOGE("%s: addStream failed", __func__);
+    }
+    return rc;
+}
+
+int32_t QCamera3MetadataChannel::request(buffer_handle_t * /*buffer*/,
+                                                uint32_t /*frameNumber*/)
+{
+    if (!m_bIsActive) {
+        return start();
+    }
+    else
+        return 0;
+}
+
+void QCamera3MetadataChannel::streamCbRoutine(
+                        mm_camera_super_buf_t *super_frame,
+                        QCamera3Stream * /*stream*/)
+{
+    ATRACE_CALL();
+    uint32_t requestNumber = 0;
+    if (super_frame == NULL || super_frame->num_bufs != 1) {
+        ALOGE("%s: super_frame is not valid", __func__);
+        return;
+    }
+    if (mChannelCB) {
+        mChannelCB(super_frame, NULL, requestNumber, false, mUserData);
+    }
+}
+
+QCamera3StreamMem* QCamera3MetadataChannel::getStreamBufs(uint32_t len)
+{
+    int rc;
+    if (len < sizeof(metadata_buffer_t)) {
+        ALOGE("%s: Metadata buffer size less than structure %d vs %d",
+                __func__,
+                len,
+                sizeof(metadata_buffer_t));
+        return NULL;
+    }
+    mMemory = new QCamera3StreamMem(MIN_STREAMING_BUFFER_NUM);
+    if (!mMemory) {
+        ALOGE("%s: unable to create metadata memory", __func__);
+        return NULL;
+    }
+    rc = mMemory->allocateAll(len);
+    if (rc < 0) {
+        ALOGE("%s: unable to allocate metadata memory", __func__);
+        delete mMemory;
+        mMemory = NULL;
+        return NULL;
+    }
+    clear_metadata_buffer((metadata_buffer_t*)mMemory->getPtr(0));
+    return mMemory;
+}
+
+void QCamera3MetadataChannel::putStreamBufs()
+{
+    mMemory->deallocate();
+    delete mMemory;
+    mMemory = NULL;
+}
+/*************************************************************************************/
+// RAW Channel related functions
+QCamera3RawChannel::QCamera3RawChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    channel_cb_routine cb_routine,
+                    cam_padding_info_t *paddingInfo,
+                    void *userData,
+                    camera3_stream_t *stream,
+                    uint32_t postprocess_mask,
+                    QCamera3Channel *metadataChannel,
+                    bool raw_16, uint32_t numBuffers) :
+                        QCamera3RegularChannel(cam_handle, channel_handle, cam_ops,
+                                cb_routine, paddingInfo, userData, stream,
+                                CAM_STREAM_TYPE_RAW, postprocess_mask, metadataChannel, numBuffers),
+                        mIsRaw16(raw_16)
+{
+    char prop[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.raw.debug.dump", prop, "0");
+    mRawDump = atoi(prop);
+}
+
+QCamera3RawChannel::~QCamera3RawChannel()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : initialize
+ *
+ * DESCRIPTION: Initialize and add camera channel & stream
+ *
+ * PARAMETERS :
+ * @isType    : image stabilization type on the stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+
+int32_t QCamera3RawChannel::initialize(cam_is_type_t isType)
+{
+    return QCamera3RegularChannel::initialize(isType);
+}
+
+void QCamera3RawChannel::streamCbRoutine(
+                        mm_camera_super_buf_t *super_frame,
+                        QCamera3Stream * stream)
+{
+    ATRACE_CALL();
+    /* Move this back down once verified */
+    if (mRawDump)
+        dumpRawSnapshot(super_frame->bufs[0]);
+
+    if (mIsRaw16) {
+        if (RAW_FORMAT == CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG)
+            convertMipiToRaw16(super_frame->bufs[0]);
+        else
+            convertLegacyToRaw16(super_frame->bufs[0]);
+    }
+
+    //Make sure cache coherence because extra processing is done
+    mMemory.cleanInvalidateCache(super_frame->bufs[0]->buf_idx);
+
+    QCamera3RegularChannel::streamCbRoutine(super_frame, stream);
+    return;
+}
+
+void QCamera3RawChannel::dumpRawSnapshot(mm_camera_buf_def_t *frame)
+{
+   QCamera3Stream *stream = getStreamByIndex(0);
+   if (stream != NULL) {
+       char buf[FILENAME_MAX];
+       memset(buf, 0, sizeof(buf));
+       cam_dimension_t dim;
+       memset(&dim, 0, sizeof(dim));
+       stream->getFrameDimension(dim);
+
+       cam_frame_len_offset_t offset;
+       memset(&offset, 0, sizeof(cam_frame_len_offset_t));
+       stream->getFrameOffset(offset);
+       snprintf(buf, sizeof(buf), QCAMERA_DUMP_FRM_LOCATION"r_%d_%dx%d.raw",
+                frame->frame_idx, offset.mp[0].stride, offset.mp[0].scanline);
+
+       int file_fd = open(buf, O_RDWR| O_CREAT, 0644);
+       if (file_fd >= 0) {
+          ssize_t written_len = write(file_fd, frame->buffer, frame->frame_len);
+          ALOGE("%s: written number of bytes %zd", __func__, written_len);
+          close(file_fd);
+       } else {
+          ALOGE("%s: failed to open file to dump image", __func__);
+       }
+   } else {
+       ALOGE("%s: Could not find stream", __func__);
+   }
+
+}
+
+void QCamera3RawChannel::convertLegacyToRaw16(mm_camera_buf_def_t *frame)
+{
+    // Convert image buffer from Opaque raw format to RAW16 format
+    // 10bit Opaque raw is stored in the format of:
+    // 0000 - p5 - p4 - p3 - p2 - p1 - p0
+    // where p0 to p5 are 6 pixels (each is 10bit)_and most significant
+    // 4 bits are 0s. Each 64bit word contains 6 pixels.
+
+  QCamera3Stream *stream = getStreamByIndex(0);
+  if (stream != NULL) {
+      cam_dimension_t dim;
+      memset(&dim, 0, sizeof(dim));
+      stream->getFrameDimension(dim);
+
+      cam_frame_len_offset_t offset;
+      memset(&offset, 0, sizeof(cam_frame_len_offset_t));
+      stream->getFrameOffset(offset);
+
+      uint32_t raw16_stride = ((uint32_t)dim.width + 15U) & ~15U;
+      uint16_t* raw16_buffer = (uint16_t *)frame->buffer;
+
+      // In-place format conversion.
+      // Raw16 format always occupy more memory than opaque raw10.
+      // Convert to Raw16 by iterating through all pixels from bottom-right
+      // to top-left of the image.
+      // One special notes:
+      // 1. Cross-platform raw16's stride is 16 pixels.
+      // 2. Opaque raw10's stride is 6 pixels, and aligned to 16 bytes.
+      for (int32_t ys = dim.height - 1; ys >= 0; ys--) {
+          uint32_t y = (uint32_t)ys;
+          uint64_t* row_start = (uint64_t *)frame->buffer +
+                  y * (uint32_t)offset.mp[0].stride_in_bytes / 8;
+          for (int32_t xs = dim.width - 1; xs >= 0; xs--) {
+              uint32_t x = (uint32_t)xs;
+              uint16_t raw16_pixel = 0x3FF & (row_start[x/6] >> (10*(x%6)));
+              raw16_buffer[y*raw16_stride+x] = raw16_pixel;
+          }
+      }
+  } else {
+      ALOGE("%s: Could not find stream", __func__);
+  }
+
+}
+
+void QCamera3RawChannel::convertMipiToRaw16(mm_camera_buf_def_t *frame)
+{
+    // Convert image buffer from mipi10 raw format to RAW16 format
+    // mipi10 opaque raw is stored in the format of:
+    // P3(1:0) P2(1:0) P1(1:0) P0(1:0) P3(9:2) P2(9:2) P1(9:2) P0(9:2)
+    // 4 pixels occupy 5 bytes, no padding needed
+
+    QCamera3Stream *stream = getStreamByIndex(0);
+    if (stream != NULL) {
+        cam_dimension_t dim;
+        memset(&dim, 0, sizeof(dim));
+        stream->getFrameDimension(dim);
+
+        cam_frame_len_offset_t offset;
+        memset(&offset, 0, sizeof(cam_frame_len_offset_t));
+        stream->getFrameOffset(offset);
+
+        uint32_t raw16_stride = ((uint32_t)dim.width + 15U) & ~15U;
+        uint16_t* raw16_buffer = (uint16_t *)frame->buffer;
+
+        // Some raw processing may be needed prior to conversion.
+        static bool raw_proc_lib_load_attempted = false;
+        static void *raw_proc_lib = NULL;
+        static void *raw_proc_fn = NULL;
+        if (! raw_proc_lib && ! raw_proc_lib_load_attempted) {
+            raw_proc_lib_load_attempted = true;
+            raw_proc_lib = dlopen("libgoog_rownr.so", RTLD_NOW);
+            if (raw_proc_lib) {
+                *(void **)&raw_proc_fn = dlsym(raw_proc_lib, "rownr_process_bayer10");
+            }
+        }
+        if (raw_proc_fn) {
+            int (*raw_proc)(unsigned char*,int,int,int,int) =
+                      (int (*)(unsigned char*,int,int,int,int))(raw_proc_fn);
+            raw_proc((unsigned char*)(frame->buffer), 0, dim.width, dim.height,
+                       offset.mp[0].stride_in_bytes);
+        }
+
+        // In-place format conversion.
+        // Raw16 format always occupy more memory than opaque raw10.
+        // Convert to Raw16 by iterating through all pixels from bottom-right
+        // to top-left of the image.
+        // One special notes:
+        // 1. Cross-platform raw16's stride is 16 pixels.
+        // 2. mipi raw10's stride is 4 pixels, and aligned to 16 bytes.
+        for (int32_t ys = dim.height - 1; ys >= 0; ys--) {
+            uint32_t y = (uint32_t)ys;
+            uint8_t* row_start = (uint8_t *)frame->buffer +
+                    y * (uint32_t)offset.mp[0].stride_in_bytes;
+            for (int32_t xs = dim.width - 1; xs >= 0; xs--) {
+                uint32_t x = (uint32_t)xs;
+                uint8_t upper_8bit = row_start[5*(x/4)+x%4];
+                uint8_t lower_2bit = ((row_start[5*(x/4)+4] >> (x%4)) & 0x3);
+                uint16_t raw16_pixel =
+                        (uint16_t)(((uint16_t)upper_8bit)<<2 |
+                        (uint16_t)lower_2bit);
+                raw16_buffer[y*raw16_stride+x] = raw16_pixel;
+            }
+        }
+    } else {
+        ALOGE("%s: Could not find stream", __func__);
+    }
+
+}
+
+/*===========================================================================
+ * FUNCTION   : getReprocessType
+ *
+ * DESCRIPTION: get the type of reprocess output supported by this channel
+ *
+ * PARAMETERS : NONE
+ *
+ * RETURN     : reprocess_type_t : type of reprocess
+ *==========================================================================*/
+reprocess_type_t QCamera3RawChannel::getReprocessType()
+{
+    return REPROCESS_TYPE_RAW;
+}
+
+
+/*************************************************************************************/
+// RAW Dump Channel related functions
+
+/*===========================================================================
+ * FUNCTION   : QCamera3RawDumpChannel
+ *
+ * DESCRIPTION: Constructor for RawDumpChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle    : Handle for Camera
+ *   @cam_ops       : Function pointer table
+ *   @rawDumpSize   : Dimensions for the Raw stream
+ *   @paddinginfo   : Padding information for stream
+ *   @userData      : Cookie for parent
+ *   @pp mask       : PP feature mask for this stream
+ *   @numBuffers    : number of max dequeued buffers
+ *
+ * RETURN           : NA
+ *==========================================================================*/
+QCamera3RawDumpChannel::QCamera3RawDumpChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    cam_dimension_t rawDumpSize,
+                    cam_padding_info_t *paddingInfo,
+                    void *userData,
+                    uint32_t postprocess_mask, uint32_t numBuffers) :
+                        QCamera3Channel(cam_handle, channel_handle, cam_ops, NULL,
+                                paddingInfo, postprocess_mask,
+                                userData, numBuffers),
+                        mDim(rawDumpSize),
+                        mMemory(NULL)
+{
+    char prop[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.raw.dump", prop, "0");
+    mRawDump = atoi(prop);
+}
+
+/*===========================================================================
+ * FUNCTION   : QCamera3RawDumpChannel
+ *
+ * DESCRIPTION: Destructor for RawDumpChannel
+ *
+ * PARAMETERS :
+ *
+ * RETURN           : NA
+ *==========================================================================*/
+
+QCamera3RawDumpChannel::~QCamera3RawDumpChannel()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : dumpRawSnapshot
+ *
+ * DESCRIPTION: Helper function to dump Raw frames
+ *
+ * PARAMETERS :
+ *  @frame      : stream buf frame to be dumped
+ *
+ *  RETURN      : NA
+ *==========================================================================*/
+void QCamera3RawDumpChannel::dumpRawSnapshot(mm_camera_buf_def_t *frame)
+{
+    QCamera3Stream *stream = getStreamByIndex(0);
+    if (stream != NULL) {
+        char buf[FILENAME_MAX];
+        struct timeval tv;
+        struct tm timeinfo_data;
+        struct tm *timeinfo;
+
+        cam_dimension_t dim;
+        memset(&dim, 0, sizeof(dim));
+        stream->getFrameDimension(dim);
+
+        cam_frame_len_offset_t offset;
+        memset(&offset, 0, sizeof(cam_frame_len_offset_t));
+        stream->getFrameOffset(offset);
+
+        gettimeofday(&tv, NULL);
+        timeinfo = localtime_r(&tv.tv_sec, &timeinfo_data);
+
+        if (NULL != timeinfo) {
+            memset(buf, 0, sizeof(buf));
+            snprintf(buf, sizeof(buf),
+                    QCAMERA_DUMP_FRM_LOCATION
+                    "%04d-%02d-%02d-%02d-%02d-%02d-%06ld_%d_%dx%d.raw",
+                    timeinfo->tm_year + 1900, timeinfo->tm_mon + 1,
+                    timeinfo->tm_mday, timeinfo->tm_hour,
+                    timeinfo->tm_min, timeinfo->tm_sec,tv.tv_usec,
+                    frame->frame_idx, dim.width, dim.height);
+
+            int file_fd = open(buf, O_RDWR| O_CREAT, 0777);
+            if (file_fd >= 0) {
+                ssize_t written_len =
+                        write(file_fd, frame->buffer, offset.frame_len);
+                CDBG("%s: written number of bytes %zd", __func__, written_len);
+                close(file_fd);
+            } else {
+                ALOGE("%s: failed to open file to dump image", __func__);
+            }
+        } else {
+            ALOGE("%s: localtime_r() error", __func__);
+        }
+    } else {
+        ALOGE("%s: Could not find stream", __func__);
+    }
+
+}
+
+/*===========================================================================
+ * FUNCTION   : streamCbRoutine
+ *
+ * DESCRIPTION: Callback routine invoked for each frame generated for
+ *              Rawdump channel
+ *
+ * PARAMETERS :
+ *   @super_frame  : stream buf frame generated
+ *   @stream       : Underlying Stream object cookie
+ *
+ * RETURN          : NA
+ *==========================================================================*/
+void QCamera3RawDumpChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                                                QCamera3Stream *stream)
+{
+    CDBG("%s: E",__func__);
+    if (super_frame == NULL || super_frame->num_bufs != 1) {
+        ALOGE("%s: super_frame is not valid", __func__);
+        return;
+    }
+
+    if (mRawDump)
+        dumpRawSnapshot(super_frame->bufs[0]);
+
+    bufDone(super_frame);
+    free(super_frame);
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamBufs
+ *
+ * DESCRIPTION: Callback function provided to interface to get buffers.
+ *
+ * PARAMETERS :
+ *   @len       : Length of each buffer to be allocated
+ *
+ * RETURN     : NULL on buffer allocation failure
+ *              QCamera3StreamMem object on sucess
+ *==========================================================================*/
+QCamera3StreamMem* QCamera3RawDumpChannel::getStreamBufs(uint32_t len)
+{
+    int rc;
+    mMemory = new QCamera3StreamMem(mNumBuffers);
+
+    if (!mMemory) {
+        ALOGE("%s: unable to create heap memory", __func__);
+        return NULL;
+    }
+    rc = mMemory->allocateAll((size_t)len);
+    if (rc < 0) {
+        ALOGE("%s: unable to allocate heap memory", __func__);
+        delete mMemory;
+        mMemory = NULL;
+        return NULL;
+    }
+    return mMemory;
+}
+
+/*===========================================================================
+ * FUNCTION   : putStreamBufs
+ *
+ * DESCRIPTION: Callback function provided to interface to return buffers.
+ *              Although no handles are actually returned, implicitl assumption
+ *              that interface will no longer use buffers and channel can
+ *              deallocated if necessary.
+ *
+ * PARAMETERS : NA
+ *
+ * RETURN     : NA
+ *==========================================================================*/
+void QCamera3RawDumpChannel::putStreamBufs()
+{
+    mMemory->deallocate();
+    delete mMemory;
+    mMemory = NULL;
+}
+
+/*===========================================================================
+ * FUNCTION : request
+ *
+ * DESCRIPTION: Request function used as trigger
+ *
+ * PARAMETERS :
+ * @recvd_frame : buffer- this will be NULL since this is internal channel
+ * @frameNumber : Undefined again since this is internal stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3RawDumpChannel::request(buffer_handle_t * /*buffer*/,
+                                                uint32_t /*frameNumber*/)
+{
+    if (!m_bIsActive) {
+        return QCamera3Channel::start();
+    }
+    else
+        return 0;
+}
+
+/*===========================================================================
+ * FUNCTION : intialize
+ *
+ * DESCRIPTION: Initializes channel params and creates underlying stream
+ *
+ * PARAMETERS :
+ *    @isType : type of image stabilization required on this stream
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3RawDumpChannel::initialize(cam_is_type_t isType)
+{
+    int32_t rc;
+
+    mIsType = isType;
+    rc = QCamera3Channel::addStream(CAM_STREAM_TYPE_RAW,
+        CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG, mDim, ROTATE_0, (uint8_t)mNumBuffers,
+        mPostProcMask, mIsType);
+    if (rc < 0) {
+        ALOGE("%s: addStream failed", __func__);
+    }
+    return rc;
+}
+/*************************************************************************************/
+
+/* QCamera3YUVChannel methods */
+
+/*===========================================================================
+ * FUNCTION   : QCamera3YUVChannel
+ *
+ * DESCRIPTION: constructor of QCamera3YUVChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *   @cb_routine : callback routine to frame aggregator
+ *   @paddingInfo : padding information for the stream
+ *   @stream     : camera3_stream_t structure
+ *   @stream_type: Channel stream type
+ *   @postprocess_mask: the postprocess mask for streams of this channel
+ *   @metadataChannel: handle to the metadataChannel
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3YUVChannel::QCamera3YUVChannel(uint32_t cam_handle,
+        uint32_t channel_handle,
+        mm_camera_ops_t *cam_ops,
+        channel_cb_routine cb_routine,
+        cam_padding_info_t *paddingInfo,
+        void *userData,
+        camera3_stream_t *stream,
+        cam_stream_type_t stream_type,
+        uint32_t postprocess_mask,
+        QCamera3Channel *metadataChannel) :
+            QCamera3ProcessingChannel(cam_handle, channel_handle, cam_ops,
+                    cb_routine, paddingInfo, userData, stream, stream_type,
+                    postprocess_mask, metadataChannel)
+{
+
+    mBypass = (postprocess_mask == CAM_QCOM_FEATURE_NONE);
+    mFrameLen = 0;
+    mEdgeMode.edge_mode = CAM_EDGE_MODE_OFF;
+    mEdgeMode.sharpness = 0;
+    mNoiseRedMode = CAM_NOISE_REDUCTION_MODE_OFF;
+    memset(&mCropRegion, 0, sizeof(mCropRegion));
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3YUVChannel
+ *
+ * DESCRIPTION: destructor of QCamera3YUVChannel
+ *
+ * PARAMETERS : none
+ *
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3YUVChannel::~QCamera3YUVChannel()
+{
+   // Deallocation of heap buffers allocated in mMemory is freed
+   // automatically by its destructor
+}
+
+/*===========================================================================
+ * FUNCTION   : initialize
+ *
+ * DESCRIPTION: Initialize and add camera channel & stream
+ *
+ * PARAMETERS :
+ * @isType    : the image stabilization type
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3YUVChannel::initialize(cam_is_type_t isType)
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+    cam_dimension_t streamDim;
+
+    if (NULL == mCamera3Stream) {
+        ALOGE("%s: Camera stream uninitialized", __func__);
+        return NO_INIT;
+    }
+
+    if (1 <= m_numStreams) {
+        // Only one stream per channel supported in v3 Hal
+        return NO_ERROR;
+    }
+
+    mIsType  = isType;
+    mStreamFormat = CALLBACK_FORMAT;
+    streamDim.width = mCamera3Stream->width;
+    streamDim.height = mCamera3Stream->height;
+
+    rc = QCamera3Channel::addStream(mStreamType,
+            mStreamFormat,
+            streamDim,
+            ROTATE_0,
+            mNumBufs,
+            mPostProcMask,
+            mIsType);
+    if (rc < 0) {
+        ALOGE("%s: addStream failed", __func__);
+        return rc;
+    }
+
+    cam_stream_buf_plane_info_t buf_planes;
+    cam_padding_info_t paddingInfo = *mPaddingInfo;
+
+    memset(&buf_planes, 0, sizeof(buf_planes));
+    //to ensure a big enough buffer size set the height and width
+    //padding to max(height padding, width padding)
+    paddingInfo.width_padding = MAX(paddingInfo.width_padding, paddingInfo.height_padding);
+    paddingInfo.height_padding = paddingInfo.width_padding;
+
+    rc = mm_stream_calc_offset_snapshot(mStreamFormat, &streamDim, &paddingInfo,
+            &buf_planes);
+    if (rc < 0) {
+        ALOGE("%s: mm_stream_calc_offset_preview failed", __func__);
+        return rc;
+    }
+
+    mFrameLen = buf_planes.plane_info.frame_len;
+
+    if (NO_ERROR != rc) {
+        ALOGE("%s: Initialize failed, rc = %d", __func__, rc);
+        return rc;
+    }
+
+    /* initialize offline meta memory for input reprocess */
+    rc = QCamera3ProcessingChannel::initialize(isType);
+    if (NO_ERROR != rc) {
+        ALOGE("%s: Processing Channel initialize failed, rc = %d",
+                __func__, rc);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : request
+ *
+ * DESCRIPTION: entry function for a request on a YUV stream. This function
+ *              has the logic to service a request based on its type
+ *
+ * PARAMETERS :
+ * @buffer          : pointer to the output buffer
+ * @frameNumber     : frame number of the request
+ * @pInputBuffer    : pointer to input buffer if an input request
+ * @metadata        : parameters associated with the request
+ *
+ * RETURN     : 0 on a success start of capture
+ *              -EINVAL on invalid input
+ *              -ENODEV on serious error
+ *==========================================================================*/
+int32_t QCamera3YUVChannel::request(buffer_handle_t *buffer,
+        uint32_t frameNumber,
+        camera3_stream_buffer_t* pInputBuffer,
+        metadata_buffer_t* metadata, bool &needMetadata)
+{
+    int32_t rc = NO_ERROR;
+    int index;
+    Mutex::Autolock lock(mOfflinePpLock);
+
+    CDBG("%s: pInputBuffer is %p", __func__, pInputBuffer);
+    CDBG("%s, frame number %d", __func__, frameNumber);
+    if (NULL == buffer || NULL == metadata) {
+        ALOGE("%s: Invalid buffer/metadata in channel request", __func__);
+        return BAD_VALUE;
+    }
+
+    PpInfo ppInfo;
+    memset(&ppInfo, 0, sizeof(ppInfo));
+    ppInfo.frameNumber = frameNumber;
+    ppInfo.offlinePpFlag = false;
+    if (mBypass && !pInputBuffer ) {
+        ppInfo.offlinePpFlag = needsFramePostprocessing(metadata);
+        ppInfo.output = buffer;
+        mOfflinePpInfoList.push_back(ppInfo);
+    }
+
+    CDBG("%s: offlinePpFlag is %d", __func__, ppInfo.offlinePpFlag);
+    needMetadata = ppInfo.offlinePpFlag;
+    if (!ppInfo.offlinePpFlag) {
+        // regular request
+        return QCamera3ProcessingChannel::request(buffer, frameNumber,
+                pInputBuffer, metadata);
+    } else {
+        if(!m_bIsActive) {
+            rc = start();
+            if (NO_ERROR != rc)
+                return rc;
+        } else {
+            CDBG("%s: Request on an existing stream",__func__);
+        }
+
+        //we need to send this frame through the CPP
+        //Allocate heap memory, then buf done on the buffer
+        uint32_t bufIdx;
+        if (mFreeHeapBufferList.empty()) {
+            rc = mMemory.allocateOne(mFrameLen);
+            if (rc < 0) {
+                ALOGE("%s: Failed allocating heap buffer. Fatal", __func__);
+                return BAD_VALUE;
+            } else {
+                bufIdx = (uint32_t)rc;
+            }
+        } else {
+            bufIdx = *(mFreeHeapBufferList.begin());
+            mFreeHeapBufferList.erase(mFreeHeapBufferList.begin());
+        }
+
+        /* Configure and start postproc if necessary */
+        reprocess_config_t reproc_cfg;
+        cam_dimension_t dim;
+        memset(&reproc_cfg, 0, sizeof(reprocess_config_t));
+        memset(&dim, 0, sizeof(dim));
+        mStreams[0]->getFrameDimension(dim);
+        setReprocConfig(reproc_cfg, NULL, metadata, mStreamFormat, dim);
+
+        // Start postprocessor without input buffer
+        startPostProc(reproc_cfg);
+
+        CDBG("%s: erasing %d", __func__, bufIdx);
+
+        mMemory.markFrameNumber(bufIdx, frameNumber);
+        mStreams[0]->bufDone(bufIdx);
+
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : streamCbRoutine
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ * @super_frame : the super frame with filled buffer
+ * @stream      : stream on which the buffer was requested and filled
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3YUVChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame,
+        QCamera3Stream *stream)
+{
+    ATRACE_CALL();
+    uint8_t frameIndex;
+    int32_t resultFrameNumber;
+
+    if (checkStreamCbErrors(super_frame, stream) != NO_ERROR) {
+        ALOGE("%s: Error with the stream callback", __func__);
+        return;
+    }
+
+    frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx;
+    if(frameIndex >= mNumBufs) {
+         ALOGE("%s: Error, Invalid index for buffer",__func__);
+         stream->bufDone(frameIndex);
+         return;
+    }
+
+    if (mBypass) {
+        List<PpInfo>::iterator ppInfo;
+
+        Mutex::Autolock lock(mOfflinePpLock);
+        resultFrameNumber = mMemory.getFrameNumber(frameIndex);
+        for (ppInfo = mOfflinePpInfoList.begin();
+                ppInfo != mOfflinePpInfoList.end(); ppInfo++) {
+            if (ppInfo->frameNumber == (uint32_t)resultFrameNumber) {
+                break;
+            }
+        }
+        CDBG("%s, frame index %d, frame number %d", __func__, frameIndex, resultFrameNumber);
+        //check the reprocessing required flag against the frame number
+        if (ppInfo == mOfflinePpInfoList.end()) {
+            ALOGE("%s: Error, request for frame number is a reprocess.", __func__);
+            stream->bufDone(frameIndex);
+            return;
+        }
+
+        if (ppInfo->offlinePpFlag) {
+            mm_camera_super_buf_t *frame =
+                    (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+            if (frame == NULL) {
+                ALOGE("%s: Error allocating memory to save received_frame structure.",
+                        __func__);
+                if(stream) {
+                    stream->bufDone(frameIndex);
+                }
+                return;
+            }
+
+            *frame = *super_frame;
+            m_postprocessor.processData(frame, ppInfo->output, resultFrameNumber);
+            free(super_frame);
+            return;
+        } else {
+            if (ppInfo != mOfflinePpInfoList.begin()) {
+                // There is pending reprocess buffer, cache current buffer
+                if (ppInfo->callback_buffer != NULL) {
+                    ALOGE("%s: Fatal: cached callback_buffer is already present",
+                        __func__);
+
+                }
+                ppInfo->callback_buffer = super_frame;
+                return;
+            } else {
+                mOfflinePpInfoList.erase(ppInfo);
+            }
+        }
+    }
+
+    QCamera3ProcessingChannel::streamCbRoutine(super_frame, stream);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : reprocessCbRoutine
+ *
+ * DESCRIPTION: callback function for the reprocessed frame. This frame now
+ *              should be returned to the framework. This same callback is
+ *              used during input reprocessing or offline postprocessing
+ *
+ * PARAMETERS :
+ * @resultBuffer      : buffer containing the reprocessed data
+ * @resultFrameNumber : frame number on which the buffer was requested
+ *
+ * RETURN     : NONE
+ *
+ *==========================================================================*/
+void QCamera3YUVChannel::reprocessCbRoutine(buffer_handle_t *resultBuffer,
+        uint32_t resultFrameNumber)
+{
+    CDBG("%s E: frame number %d", __func__, resultFrameNumber);
+    Vector<mm_camera_super_buf_t *> pendingCbs;
+
+    /* release the input buffer and input metadata buffer if used */
+    if (0 > mMemory.getHeapBufferIndex(resultFrameNumber)) {
+        /* mOfflineMemory and mOfflineMetaMemory used only for input reprocessing */
+        int32_t rc = releaseOfflineMemory(resultFrameNumber);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: Error releasing offline memory rc = %d", __func__, rc);
+        }
+        /* Since reprocessing is done, send the callback to release the input buffer */
+        if (mChannelCB) {
+            mChannelCB(NULL, NULL, resultFrameNumber, true, mUserData);
+        }
+    }
+
+    if (mBypass) {
+        int32_t rc = handleOfflinePpCallback(resultFrameNumber, pendingCbs);
+        if (rc != NO_ERROR) {
+            return;
+        }
+    }
+
+    issueChannelCb(resultBuffer, resultFrameNumber);
+
+    // Call all pending callbacks to return buffers
+    for (size_t i = 0; i < pendingCbs.size(); i++) {
+        QCamera3ProcessingChannel::streamCbRoutine(
+                pendingCbs[i], mStreams[0]);
+    }
+
+}
+
+/*===========================================================================
+ * FUNCTION   : needsFramePostprocessing
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *
+ * RETURN     :
+ *  TRUE if frame needs to be postprocessed
+ *  FALSE is frame does not need to be postprocessed
+ *
+ *==========================================================================*/
+bool QCamera3YUVChannel::needsFramePostprocessing(metadata_buffer_t *meta)
+{
+    bool ppNeeded = false;
+
+    //sharpness
+    IF_META_AVAILABLE(cam_edge_application_t, edgeMode,
+            CAM_INTF_META_EDGE_MODE, meta) {
+        mEdgeMode = *edgeMode;
+    }
+
+    //wnr
+    IF_META_AVAILABLE(uint32_t, noiseRedMode,
+            CAM_INTF_META_NOISE_REDUCTION_MODE, meta) {
+        mNoiseRedMode = *noiseRedMode;
+    }
+
+    //crop region
+    IF_META_AVAILABLE(cam_crop_region_t, scalerCropRegion,
+            CAM_INTF_META_SCALER_CROP_REGION, meta) {
+        mCropRegion = *scalerCropRegion;
+    }
+
+    if ((CAM_EDGE_MODE_OFF != mEdgeMode.edge_mode) &&
+            (CAM_EDGE_MODE_ZERO_SHUTTER_LAG != mEdgeMode.edge_mode)) {
+        ppNeeded = true;
+    }
+    if ((CAM_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG != mNoiseRedMode) &&
+            (CAM_NOISE_REDUCTION_MODE_OFF != mNoiseRedMode) &&
+            (CAM_NOISE_REDUCTION_MODE_MINIMAL != mNoiseRedMode)) {
+        ppNeeded = true;
+    }
+    if ((mCropRegion.width < (int32_t)mCamera3Stream->width) ||
+            (mCropRegion.height < (int32_t)mCamera3Stream->height)) {
+        ppNeeded = true;
+    }
+
+    return ppNeeded;
+}
+
+/*===========================================================================
+ * FUNCTION   : handleOfflinePpCallback
+ *
+ * DESCRIPTION: callback function for the reprocessed frame from offline
+ *              postprocessing.
+ *
+ * PARAMETERS :
+ * @resultFrameNumber : frame number on which the buffer was requested
+ * @pendingCbs        : pending buffers to be returned first
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3YUVChannel::handleOfflinePpCallback(uint32_t resultFrameNumber,
+            Vector<mm_camera_super_buf_t *>& pendingCbs)
+{
+    Mutex::Autolock lock(mOfflinePpLock);
+    List<PpInfo>::iterator ppInfo;
+
+    for (ppInfo = mOfflinePpInfoList.begin();
+            ppInfo != mOfflinePpInfoList.end(); ppInfo++) {
+        if (ppInfo->frameNumber == resultFrameNumber) {
+            break;
+        }
+    }
+
+    if (ppInfo == mOfflinePpInfoList.end()) {
+        ALOGI("%s: Request of frame number %d is reprocessing",
+                __func__, resultFrameNumber);
+        return NO_ERROR;
+    } else if (ppInfo != mOfflinePpInfoList.begin()) {
+        ALOGE("%s: callback for frame number %d should be head of list",
+                __func__, resultFrameNumber);
+        return BAD_VALUE;
+    }
+
+    if (ppInfo->offlinePpFlag) {
+        // Need to get the input buffer frame index from the
+        // mMemory object and add that to the free heap buffers list.
+        int32_t bufferIndex =
+                mMemory.getHeapBufferIndex(resultFrameNumber);
+        if (bufferIndex < 0) {
+            ALOGE("%s: Fatal %d: no buffer index for frame number %d",
+                    __func__, bufferIndex, resultFrameNumber);
+            return BAD_VALUE;
+        }
+        mFreeHeapBufferList.push_back(bufferIndex);
+        ppInfo = mOfflinePpInfoList.erase(ppInfo);
+
+        // Return pending buffer callbacks
+        while (ppInfo != mOfflinePpInfoList.end() &&
+                !ppInfo->offlinePpFlag && ppInfo->callback_buffer) {
+
+            // Call stream callbacks for cached buffers
+            pendingCbs.push_back(ppInfo->callback_buffer);
+
+            ppInfo = mOfflinePpInfoList.erase(ppInfo);
+        }
+
+    } else {
+        ALOGE("%s: Fatal: request of frame number %d doesn't need"
+                " offline postprocessing. However there is"
+                " reprocessing callback.", __func__,
+                resultFrameNumber);
+        return BAD_VALUE;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getReprocessType
+ *
+ * DESCRIPTION: get the type of reprocess output supported by this channel
+ *
+ * PARAMETERS : NONE
+ *
+ * RETURN     : reprocess_type_t : type of reprocess
+ *==========================================================================*/
+reprocess_type_t QCamera3YUVChannel::getReprocessType()
+{
+    return REPROCESS_TYPE_YUV;
+}
+
+/* QCamera3PicChannel methods */
+
+/*===========================================================================
+ * FUNCTION   : jpegEvtHandle
+ *
+ * DESCRIPTION: Function registerd to mm-jpeg-interface to handle jpeg events.
+                Construct result payload and call mChannelCb to deliver buffer
+                to framework.
+ *
+ * PARAMETERS :
+ *   @status    : status of jpeg job
+ *   @client_hdl: jpeg client handle
+ *   @jobId     : jpeg job Id
+ *   @p_ouput   : ptr to jpeg output result struct
+ *   @userdata  : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3PicChannel::jpegEvtHandle(jpeg_job_status_t status,
+                                              uint32_t /*client_hdl*/,
+                                              uint32_t jobId,
+                                              mm_jpeg_output_t *p_output,
+                                              void *userdata)
+{
+    ATRACE_CALL();
+    buffer_handle_t *resultBuffer = NULL;
+    buffer_handle_t *jpegBufferHandle = NULL;
+    int resultStatus = CAMERA3_BUFFER_STATUS_OK;
+    camera3_stream_buffer_t result;
+    camera3_jpeg_blob_t jpegHeader;
+
+    QCamera3PicChannel *obj = (QCamera3PicChannel *)userdata;
+    if (obj) {
+        //Construct payload for process_capture_result. Call mChannelCb
+
+        qcamera_hal3_jpeg_data_t *job = obj->m_postprocessor.findJpegJobByJobId(jobId);
+
+        if ((job == NULL) || (status == JPEG_JOB_STATUS_ERROR)) {
+            ALOGE("%s: Error in jobId: (%d) with status: %d", __func__, jobId, status);
+            resultStatus = CAMERA3_BUFFER_STATUS_ERROR;
+        }
+
+        if (NULL != job) {
+            uint32_t bufIdx = (uint32_t)job->jpeg_settings->out_buf_index;
+            CDBG("%s: jpeg out_buf_index: %d", __func__, bufIdx);
+
+            //Construct jpeg transient header of type camera3_jpeg_blob_t
+            //Append at the end of jpeg image of buf_filled_len size
+
+            jpegHeader.jpeg_blob_id = CAMERA3_JPEG_BLOB_ID;
+            if (JPEG_JOB_STATUS_DONE == status) {
+                jpegHeader.jpeg_size = (uint32_t)p_output->buf_filled_len;
+                char* jpeg_buf = (char *)p_output->buf_vaddr;
+
+                ssize_t maxJpegSize = -1;
+
+                // Gralloc buffer may have additional padding for 4K page size
+                // Follow size guidelines based on spec since framework relies
+                // on that to reach end of buffer and with it the header
+
+                //Handle same as resultBuffer, but for readablity
+                jpegBufferHandle =
+                        (buffer_handle_t *)obj->mMemory.getBufferHandle(bufIdx);
+
+                if (NULL != jpegBufferHandle) {
+                    maxJpegSize = ((private_handle_t*)(*jpegBufferHandle))->width;
+                    if (maxJpegSize > obj->mMemory.getSize(bufIdx)) {
+                        maxJpegSize = obj->mMemory.getSize(bufIdx);
+                    }
+
+                    size_t jpeg_eof_offset =
+                            (size_t)(maxJpegSize - (ssize_t)sizeof(jpegHeader));
+                    char *jpeg_eof = &jpeg_buf[jpeg_eof_offset];
+                    memcpy(jpeg_eof, &jpegHeader, sizeof(jpegHeader));
+                    obj->mMemory.cleanInvalidateCache(bufIdx);
+                } else {
+                    ALOGE("%s: JPEG buffer not found and index: %d",
+                            __func__,
+                            bufIdx);
+                    resultStatus = CAMERA3_BUFFER_STATUS_ERROR;
+                }
+            }
+
+            ////Use below data to issue framework callback
+            resultBuffer =
+                    (buffer_handle_t *)obj->mMemory.getBufferHandle(bufIdx);
+            int32_t resultFrameNumber = obj->mMemory.getFrameNumber(bufIdx);
+            int32_t rc = obj->mMemory.unregisterBuffer(bufIdx);
+            if (NO_ERROR != rc) {
+                ALOGE("%s: Error %d unregistering stream buffer %d",
+                    __func__, rc, bufIdx);
+            }
+
+            result.stream = obj->mCamera3Stream;
+            result.buffer = resultBuffer;
+            result.status = resultStatus;
+            result.acquire_fence = -1;
+            result.release_fence = -1;
+
+            // Release any snapshot buffers before calling
+            // the user callback. The callback can potentially
+            // unblock pending requests to snapshot stream.
+            int32_t snapshotIdx = -1;
+            mm_camera_super_buf_t* src_frame = NULL;
+
+            if (job->src_reproc_frame)
+                src_frame = job->src_reproc_frame;
+            else
+                src_frame = job->src_frame;
+
+            if (src_frame) {
+                if (obj->mStreams[0]->getMyHandle() ==
+                        src_frame->bufs[0]->stream_id) {
+                    snapshotIdx = (int32_t)src_frame->bufs[0]->buf_idx;
+                } else {
+                    ALOGE("%s: Snapshot stream id %d and source frame %d don't match!",
+                            __func__, obj->mStreams[0]->getMyHandle(),
+                            src_frame->bufs[0]->stream_id);
+                }
+            }
+            if (0 <= snapshotIdx) {
+                Mutex::Autolock lock(obj->mFreeBuffersLock);
+                obj->mFreeBufferList.push_back((uint32_t)snapshotIdx);
+            } else {
+                ALOGE("%s: Snapshot buffer not found!", __func__);
+            }
+
+            CDBG("%s: Issue Callback", __func__);
+            if (obj->mChannelCB) {
+                obj->mChannelCB(NULL,
+                        &result,
+                        (uint32_t)resultFrameNumber,
+                        false,
+                        obj->mUserData);
+            }
+
+            // release internal data for jpeg job
+            if ((NULL != job->fwk_frame) || (NULL != job->fwk_src_buffer)) {
+                /* unregister offline input buffer */
+                int32_t inputBufIndex =
+                        obj->mOfflineMemory.getGrallocBufferIndex((uint32_t)resultFrameNumber);
+                if (0 <= inputBufIndex) {
+                    rc = obj->mOfflineMemory.unregisterBuffer(inputBufIndex);
+                } else {
+                    ALOGE("%s: could not find the input buf index, frame number %d",
+                            __func__, resultFrameNumber);
+                }
+                if (NO_ERROR != rc) {
+                    ALOGE("%s: Error %d unregistering input buffer %d",
+                            __func__, rc, bufIdx);
+                }
+
+                /* unregister offline meta buffer */
+                int32_t metaBufIndex =
+                        obj->mOfflineMetaMemory.getHeapBufferIndex((uint32_t)resultFrameNumber);
+                if (0 <= metaBufIndex) {
+                    Mutex::Autolock lock(obj->mFreeOfflineMetaBuffersLock);
+                    obj->mFreeOfflineMetaBuffersList.push_back((uint32_t)metaBufIndex);
+                } else {
+                    ALOGE("%s: could not find the input meta buf index, frame number %d",
+                            __func__, resultFrameNumber);
+                }
+            }
+            obj->m_postprocessor.releaseOfflineBuffers();
+            obj->m_postprocessor.releaseJpegJobData(job);
+            free(job);
+        }
+
+        return;
+        // }
+    } else {
+        ALOGE("%s: Null userdata in jpeg callback", __func__);
+    }
+}
+
+QCamera3PicChannel::QCamera3PicChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    channel_cb_routine cb_routine,
+                    cam_padding_info_t *paddingInfo,
+                    void *userData,
+                    camera3_stream_t *stream,
+                    uint32_t postprocess_mask,
+                    bool is4KVideo,
+                    bool isInputStreamConfigured,
+                    QCamera3Channel *metadataChannel,
+                    uint32_t numBuffers) :
+                        QCamera3ProcessingChannel(cam_handle, channel_handle,
+                                cam_ops, cb_routine, paddingInfo, userData,
+                                stream, CAM_STREAM_TYPE_SNAPSHOT,
+                                postprocess_mask, metadataChannel, numBuffers),
+                        mNumSnapshotBufs(0),
+                        mInputBufferHint(isInputStreamConfigured),
+                        mYuvMemory(NULL),
+                        mFrameLen(0)
+{
+    QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)mUserData;
+    m_max_pic_dim = hal_obj->calcMaxJpegDim();
+    mYuvWidth = stream->width;
+    mYuvHeight = stream->height;
+    mStreamType = CAM_STREAM_TYPE_SNAPSHOT;
+    // Use same pixelformat for 4K video case
+    mStreamFormat = is4KVideo ? VIDEO_FORMAT : SNAPSHOT_FORMAT;
+    int32_t rc = m_postprocessor.initJpeg(jpegEvtHandle, &m_max_pic_dim, this);
+    if (rc != 0) {
+        ALOGE("Init Postprocessor failed");
+    }
+}
+
+QCamera3PicChannel::~QCamera3PicChannel()
+{
+}
+
+int32_t QCamera3PicChannel::initialize(cam_is_type_t isType)
+{
+    int32_t rc = NO_ERROR;
+    cam_dimension_t streamDim;
+    cam_stream_type_t streamType;
+    cam_format_t streamFormat;
+    mm_camera_channel_attr_t attr;
+
+    if (NULL == mCamera3Stream) {
+        ALOGE("%s: Camera stream uninitialized", __func__);
+        return NO_INIT;
+    }
+
+    if (1 <= m_numStreams) {
+        // Only one stream per channel supported in v3 Hal
+        return NO_ERROR;
+    }
+
+    mIsType = isType;
+    streamType = mStreamType;
+    streamFormat = mStreamFormat;
+    streamDim.width = (int32_t)mYuvWidth;
+    streamDim.height = (int32_t)mYuvHeight;
+
+    mNumSnapshotBufs = mCamera3Stream->max_buffers;
+    rc = QCamera3Channel::addStream(streamType, streamFormat, streamDim,
+            ROTATE_0, (uint8_t)mCamera3Stream->max_buffers, mPostProcMask,
+            mIsType);
+
+    if (NO_ERROR != rc) {
+        ALOGE("%s: Initialize failed, rc = %d", __func__, rc);
+        return rc;
+    }
+
+    /* initialize offline meta memory for input reprocess */
+    rc = QCamera3ProcessingChannel::initialize(isType);
+    if (NO_ERROR != rc) {
+        ALOGE("%s: Processing Channel initialize failed, rc = %d",
+                __func__, rc);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : request
+ *
+ * DESCRIPTION: handle the request - either with an input buffer or a direct
+ *              output request
+ *
+ * PARAMETERS :
+ * @buffer       : pointer to the output buffer
+ * @frameNumber  : frame number of the request
+ * @pInputBuffer : pointer to input buffer if an input request
+ * @metadata     : parameters associated with the request
+ *
+ * RETURN     : 0 on a success start of capture
+ *              -EINVAL on invalid input
+ *              -ENODEV on serious error
+ *==========================================================================*/
+int32_t QCamera3PicChannel::request(buffer_handle_t *buffer,
+        uint32_t frameNumber,
+        camera3_stream_buffer_t *pInputBuffer,
+        metadata_buffer_t *metadata)
+{
+    ATRACE_CALL();
+    //FIX ME: Return buffer back in case of failures below.
+
+    int32_t rc = NO_ERROR;
+
+    reprocess_config_t reproc_cfg;
+    cam_dimension_t dim;
+    memset(&reproc_cfg, 0, sizeof(reprocess_config_t));
+    //make sure to set the correct input stream dim in case of YUV size override
+    //and recalculate the plane info
+    dim.width = (int32_t)mYuvWidth;
+    dim.height = (int32_t)mYuvHeight;
+    setReprocConfig(reproc_cfg, pInputBuffer, metadata, mStreamFormat, dim);
+
+    // Picture stream has already been started before any request comes in
+    if (!m_bIsActive) {
+        ALOGE("%s: Channel not started!!", __func__);
+        return NO_INIT;
+    }
+
+    int index = mMemory.getMatchBufIndex((void*)buffer);
+
+    if(index < 0) {
+        rc = registerBuffer(buffer, mIsType);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: On-the-fly buffer registration failed %d",
+                    __func__, rc);
+            return rc;
+        }
+
+        index = mMemory.getMatchBufIndex((void*)buffer);
+        if (index < 0) {
+            ALOGE("%s: Could not find object among registered buffers",__func__);
+            return DEAD_OBJECT;
+        }
+    }
+    CDBG("%s: buffer index %d, frameNumber: %u", __func__, index, frameNumber);
+
+    rc = mMemory.markFrameNumber((uint32_t)index, frameNumber);
+
+    // Start postprocessor
+    startPostProc(reproc_cfg);
+
+    // Queue jpeg settings
+    rc = queueJpegSetting((uint32_t)index, metadata);
+
+    if (pInputBuffer == NULL) {
+        Mutex::Autolock lock(mFreeBuffersLock);
+        uint32_t bufIdx;
+        if (mFreeBufferList.empty()) {
+            rc = mYuvMemory->allocateOne(mFrameLen);
+            if (rc < 0) {
+                ALOGE("%s: Failed to allocate heap buffer. Fatal", __func__);
+                return rc;
+            } else {
+                bufIdx = (uint32_t)rc;
+            }
+        } else {
+            List<uint32_t>::iterator it = mFreeBufferList.begin();
+            bufIdx = *it;
+            mFreeBufferList.erase(it);
+        }
+        mYuvMemory->markFrameNumber(bufIdx, frameNumber);
+        mStreams[0]->bufDone(bufIdx);
+    } else {
+        qcamera_fwk_input_pp_data_t *src_frame = NULL;
+        src_frame = (qcamera_fwk_input_pp_data_t *)calloc(1,
+                sizeof(qcamera_fwk_input_pp_data_t));
+        if (src_frame == NULL) {
+            ALOGE("%s: No memory for src frame", __func__);
+            return NO_MEMORY;
+        }
+        rc = setFwkInputPPData(src_frame, pInputBuffer, &reproc_cfg, metadata,
+                NULL /*fwk output buffer*/, frameNumber);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: Error %d while setting framework input PP data", __func__, rc);
+            free(src_frame);
+            return rc;
+        }
+        CDBG_HIGH("%s: Post-process started", __func__);
+        CDBG_HIGH("%s: Issue call to reprocess", __func__);
+        m_postprocessor.processData(src_frame);
+    }
+    return rc;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : dataNotifyCB
+ *
+ * DESCRIPTION: Channel Level callback used for super buffer data notify.
+ *              This function is registered with mm-camera-interface to handle
+ *              data notify
+ *
+ * PARAMETERS :
+ *   @recvd_frame   : stream frame received
+ *   userdata       : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3PicChannel::dataNotifyCB(mm_camera_super_buf_t *recvd_frame,
+                                 void *userdata)
+{
+    ATRACE_CALL();
+    CDBG("%s: E\n", __func__);
+    QCamera3PicChannel *channel = (QCamera3PicChannel *)userdata;
+
+    if (channel == NULL) {
+        ALOGE("%s: invalid channel pointer", __func__);
+        return;
+    }
+
+    if(channel->m_numStreams != 1) {
+        ALOGE("%s: Error: Bug: This callback assumes one stream per channel",__func__);
+        return;
+    }
+
+
+    if(channel->mStreams[0] == NULL) {
+        ALOGE("%s: Error: Invalid Stream object",__func__);
+        return;
+    }
+
+    channel->QCamera3PicChannel::streamCbRoutine(recvd_frame, channel->mStreams[0]);
+
+    CDBG("%s: X\n", __func__);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : streamCbRoutine
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ * @super_frame : the super frame with filled buffer
+ * @stream      : stream on which the buffer was requested and filled
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3PicChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                            QCamera3Stream *stream)
+{
+    ATRACE_CALL();
+    //TODO
+    //Used only for getting YUV. Jpeg callback will be sent back from channel
+    //directly to HWI. Refer to func jpegEvtHandle
+
+    //Got the yuv callback. Calling yuv callback handler in PostProc
+    uint8_t frameIndex;
+    mm_camera_super_buf_t* frame = NULL;
+
+    if (checkStreamCbErrors(super_frame, stream) != NO_ERROR) {
+        ALOGE("%s: Error with the stream callback", __func__);
+        return;
+    }
+
+    frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx;
+    CDBG("%s: recvd buf_idx: %u for further processing",
+        __func__, (uint32_t)frameIndex);
+    if(frameIndex >= mNumSnapshotBufs) {
+         ALOGE("%s: Error, Invalid index for buffer",__func__);
+         if(stream) {
+             Mutex::Autolock lock(mFreeBuffersLock);
+             mFreeBufferList.push_back(frameIndex);
+             stream->bufDone(frameIndex);
+         }
+         return;
+    }
+
+    frame = (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+    if (frame == NULL) {
+       ALOGE("%s: Error allocating memory to save received_frame structure.",
+                                                                    __func__);
+       if(stream) {
+           Mutex::Autolock lock(mFreeBuffersLock);
+           mFreeBufferList.push_back(frameIndex);
+           stream->bufDone(frameIndex);
+       }
+       return;
+    }
+    *frame = *super_frame;
+
+    if (mYUVDump) {
+        cam_dimension_t dim;
+        memset(&dim, 0, sizeof(dim));
+        stream->getFrameDimension(dim);
+        cam_frame_len_offset_t offset;
+        memset(&offset, 0, sizeof(cam_frame_len_offset_t));
+        stream->getFrameOffset(offset);
+        dumpYUV(frame->bufs[0], dim, offset, 1);
+    }
+
+    m_postprocessor.processData(frame);
+    free(super_frame);
+    return;
+}
+
+QCamera3StreamMem* QCamera3PicChannel::getStreamBufs(uint32_t len)
+{
+    int rc = 0;
+
+    mYuvMemory = new QCamera3StreamMem(mCamera3Stream->max_buffers, false);
+    if (!mYuvMemory) {
+        ALOGE("%s: unable to create metadata memory", __func__);
+        return NULL;
+    }
+    mFrameLen = len;
+
+    return mYuvMemory;
+}
+
+void QCamera3PicChannel::putStreamBufs()
+{
+    QCamera3ProcessingChannel::putStreamBufs();
+
+    mYuvMemory->deallocate();
+    delete mYuvMemory;
+    mYuvMemory = NULL;
+    mFreeBufferList.clear();
+}
+
+int32_t QCamera3PicChannel::queueJpegSetting(uint32_t index, metadata_buffer_t *metadata)
+{
+    QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)mUserData;
+    jpeg_settings_t *settings =
+            (jpeg_settings_t *)malloc(sizeof(jpeg_settings_t));
+
+    if (!settings) {
+        ALOGE("%s: out of memory allocating jpeg_settings", __func__);
+        return -ENOMEM;
+    }
+
+    memset(settings, 0, sizeof(jpeg_settings_t));
+
+    settings->out_buf_index = index;
+
+    settings->jpeg_orientation = 0;
+    IF_META_AVAILABLE(int32_t, orientation, CAM_INTF_META_JPEG_ORIENTATION, metadata) {
+        settings->jpeg_orientation = *orientation;
+    }
+
+    settings->jpeg_quality = 85;
+    IF_META_AVAILABLE(uint32_t, quality1, CAM_INTF_META_JPEG_QUALITY, metadata) {
+        settings->jpeg_quality = (uint8_t) *quality1;
+    }
+
+    IF_META_AVAILABLE(uint32_t, quality2, CAM_INTF_META_JPEG_THUMB_QUALITY, metadata) {
+        settings->jpeg_thumb_quality = (uint8_t) *quality2;
+    }
+
+    IF_META_AVAILABLE(cam_dimension_t, dimension, CAM_INTF_META_JPEG_THUMB_SIZE, metadata) {
+        settings->thumbnail_size = *dimension;
+    }
+
+    settings->gps_timestamp_valid = 0;
+    IF_META_AVAILABLE(int64_t, timestamp, CAM_INTF_META_JPEG_GPS_TIMESTAMP, metadata) {
+        settings->gps_timestamp = *timestamp;
+        settings->gps_timestamp_valid = 1;
+    }
+
+    settings->gps_coordinates_valid = 0;
+    IF_META_AVAILABLE(double, coordinates, CAM_INTF_META_JPEG_GPS_COORDINATES, metadata) {
+        memcpy(settings->gps_coordinates, coordinates, 3*sizeof(double));
+        settings->gps_coordinates_valid = 1;
+    }
+
+    IF_META_AVAILABLE(uint8_t, proc_methods, CAM_INTF_META_JPEG_GPS_PROC_METHODS, metadata) {
+        memset(settings->gps_processing_method, 0,
+                sizeof(settings->gps_processing_method));
+        strlcpy(settings->gps_processing_method, (const char *)proc_methods,
+                sizeof(settings->gps_processing_method));
+    }
+
+    // Image description
+    const char *eepromVersion = hal_obj->getEepromVersionInfo();
+    const uint32_t *ldafCalib = hal_obj->getLdafCalib();
+    if ((eepromVersion && strlen(eepromVersion)) ||
+            ldafCalib) {
+        int len = 0;
+        settings->image_desc_valid = true;
+        if (eepromVersion && strlen(eepromVersion)) {
+            len = snprintf(settings->image_desc, sizeof(settings->image_desc),
+                    "M:%s ", eepromVersion);
+        }
+        if (ldafCalib) {
+            snprintf(settings->image_desc + len,
+                    sizeof(settings->image_desc) - len, "L:%u-%u",
+                    ldafCalib[0], ldafCalib[1]);
+        }
+    }
+
+    return m_postprocessor.processJpegSettingData(settings);
+}
+
+/*===========================================================================
+ * FUNCTION   : overrideYuvSize
+ *
+ * DESCRIPTION: constructor of QCamera3ReprocessChannel
+ *
+ * PARAMETERS :
+ *   @width     : new width
+ *   @height    : new height
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3PicChannel::overrideYuvSize(uint32_t width, uint32_t height)
+{
+   mYuvWidth = width;
+   mYuvHeight = height;
+}
+
+/*===========================================================================
+ * FUNCTION   : getReprocessType
+ *
+ * DESCRIPTION: get the type of reprocess output supported by this channel
+ *
+ * PARAMETERS : NONE
+ *
+ * RETURN     : reprocess_type_t : type of reprocess
+ *==========================================================================*/
+reprocess_type_t QCamera3PicChannel::getReprocessType()
+{
+    /* a picture channel could either use the postprocessor for reprocess+jpeg
+       or only for reprocess */
+    reprocess_type_t expectedReprocess;
+    if (mPostProcMask == CAM_QCOM_FEATURE_NONE || mInputBufferHint) {
+        expectedReprocess = REPROCESS_TYPE_JPEG;
+    } else {
+        expectedReprocess = REPROCESS_TYPE_NONE;
+    }
+    CDBG_HIGH("%s: expectedReprocess from Pic Channel is %d", __func__, expectedReprocess);
+    return expectedReprocess;
+}
+
+/* Reprocess Channel methods */
+
+/*===========================================================================
+ * FUNCTION   : QCamera3ReprocessChannel
+ *
+ * DESCRIPTION: constructor of QCamera3ReprocessChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *   @pp_mask    : post-proccess feature mask
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3ReprocessChannel::QCamera3ReprocessChannel(uint32_t cam_handle,
+                                                 uint32_t channel_handle,
+                                                 mm_camera_ops_t *cam_ops,
+                                                 channel_cb_routine cb_routine,
+                                                 cam_padding_info_t *paddingInfo,
+                                                 uint32_t postprocess_mask,
+                                                 void *userData, void *ch_hdl) :
+    /* In case of framework reprocessing, pproc and jpeg operations could be
+     * parallelized by allowing 1 extra buffer for reprocessing output:
+     * ch_hdl->getNumBuffers() + 1 */
+    QCamera3Channel(cam_handle, channel_handle, cam_ops, cb_routine, paddingInfo,
+                    postprocess_mask, userData,
+                    ((QCamera3ProcessingChannel *)ch_hdl)->getNumBuffers()
+                              + (MAX_REPROCESS_PIPELINE_STAGES - 1)),
+    inputChHandle(ch_hdl),
+    mOfflineBuffersIndex(-1),
+    mFrameLen(0),
+    mReprocessType(REPROCESS_TYPE_NONE),
+    m_pSrcChannel(NULL),
+    m_pMetaChannel(NULL),
+    mMemory(NULL),
+    mGrallocMemory(0)
+{
+    memset(mSrcStreamHandles, 0, sizeof(mSrcStreamHandles));
+    mOfflineBuffersIndex = mNumBuffers -1;
+    mOfflineMetaIndex = (int32_t) (2*mNumBuffers -1);
+}
+
+
+/*===========================================================================
+ * FUNCTION   : QCamera3ReprocessChannel
+ *
+ * DESCRIPTION: constructor of QCamera3ReprocessChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *   @pp_mask    : post-proccess feature mask
+ *
+ * RETURN     : none
+ *==========================================================================*/
+int32_t QCamera3ReprocessChannel::initialize(cam_is_type_t isType)
+{
+    int32_t rc = NO_ERROR;
+    mm_camera_channel_attr_t attr;
+
+    memset(&attr, 0, sizeof(mm_camera_channel_attr_t));
+    attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS;
+    attr.max_unmatched_frames = 1;
+
+    m_handle = m_camOps->add_channel(m_camHandle,
+                                      &attr,
+                                      NULL,
+                                      this);
+    if (m_handle == 0) {
+        ALOGE("%s: Add channel failed", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    mIsType = isType;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : registerBuffer
+ *
+ * DESCRIPTION: register streaming buffer to the channel object
+ *
+ * PARAMETERS :
+ *   @buffer     : buffer to be registered
+ *   @isType     : the image stabilization type for the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ReprocessChannel::registerBuffer(buffer_handle_t *buffer,
+        cam_is_type_t isType)
+{
+    ATRACE_CALL();
+    int rc = 0;
+    mIsType = isType;
+    cam_stream_type_t streamType;
+
+    if (buffer == NULL) {
+        ALOGE("%s: Error: Cannot register a NULL buffer", __func__);
+        return BAD_VALUE;
+    }
+
+    if ((uint32_t)mGrallocMemory.getCnt() > (mNumBuffers - 1)) {
+        ALOGE("%s: Trying to register more buffers than initially requested",
+                __func__);
+        return BAD_VALUE;
+    }
+
+    if (0 == m_numStreams) {
+        rc = initialize(mIsType);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Couldn't initialize camera stream %d",
+                    __func__, rc);
+            return rc;
+        }
+    }
+
+    streamType = mStreams[0]->getMyType();
+    rc = mGrallocMemory.registerBuffer(buffer, streamType);
+    if (ALREADY_EXISTS == rc) {
+        return NO_ERROR;
+    } else if (NO_ERROR != rc) {
+        ALOGE("%s: Buffer %p couldn't be registered %d", __func__, buffer, rc);
+        return rc;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCamera3ReprocessChannel
+ *
+ * DESCRIPTION: constructor of QCamera3ReprocessChannel
+ *
+ * PARAMETERS :
+ *   @cam_handle : camera handle
+ *   @cam_ops    : ptr to camera ops table
+ *   @pp_mask    : post-proccess feature mask
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3ReprocessChannel::streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                                  QCamera3Stream *stream)
+{
+    //Got the pproc data callback. Now send to jpeg encoding
+    uint8_t frameIndex;
+    uint32_t resultFrameNumber;
+    mm_camera_super_buf_t* frame = NULL;
+    QCamera3ProcessingChannel *obj = (QCamera3ProcessingChannel *)inputChHandle;
+
+    if(!super_frame) {
+         ALOGE("%s: Invalid Super buffer",__func__);
+         return;
+    }
+
+    if(super_frame->num_bufs != 1) {
+         ALOGE("%s: Multiple streams are not supported",__func__);
+         return;
+    }
+    if(super_frame->bufs[0] == NULL ) {
+         ALOGE("%s: Error, Super buffer frame does not contain valid buffer",
+                  __func__);
+         return;
+    }
+    frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx;
+
+    if (mYUVDump) {
+        cam_dimension_t dim;
+        memset(&dim, 0, sizeof(dim));
+        stream->getFrameDimension(dim);
+        cam_frame_len_offset_t offset;
+        memset(&offset, 0, sizeof(cam_frame_len_offset_t));
+        stream->getFrameOffset(offset);
+        dumpYUV(super_frame->bufs[0], dim, offset, 2);
+    }
+
+    if (mReprocessType == REPROCESS_TYPE_JPEG) {
+        resultFrameNumber =  mMemory->getFrameNumber(frameIndex);
+        frame = (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+        if (frame == NULL) {
+           ALOGE("%s: Error allocating memory to save received_frame structure.",
+                                                                        __func__);
+           if(stream) {
+               stream->bufDone(frameIndex);
+           }
+           return;
+        }
+        CDBG("%s: bufIndex: %u recvd from post proc",
+            __func__, (uint32_t)frameIndex);
+        *frame = *super_frame;
+
+        /* Since reprocessing is done, send the callback to release the input buffer */
+        if (mChannelCB) {
+            mChannelCB(NULL, NULL, resultFrameNumber, true, mUserData);
+        }
+        obj->m_postprocessor.processPPData(frame);
+    } else {
+        buffer_handle_t *resultBuffer;
+        frameIndex = (uint8_t)super_frame->bufs[0]->buf_idx;
+        resultBuffer = (buffer_handle_t *)mGrallocMemory.getBufferHandle(frameIndex);
+        resultFrameNumber = mGrallocMemory.getFrameNumber(frameIndex);
+        int32_t rc = stream->bufRelease(frameIndex);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: Error %d releasing stream buffer %d",
+                    __func__, rc, frameIndex);
+        }
+        rc = mGrallocMemory.unregisterBuffer(frameIndex);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: Error %d unregistering stream buffer %d",
+                    __func__, rc, frameIndex);
+        }
+        obj->reprocessCbRoutine(resultBuffer, resultFrameNumber);
+
+        obj->m_postprocessor.releaseOfflineBuffers();
+        qcamera_hal3_pp_data_t *pp_job = obj->m_postprocessor.dequeuePPJob(resultFrameNumber);
+        if (pp_job != NULL) {
+            obj->m_postprocessor.releasePPJobData(pp_job);
+        }
+        free(pp_job);
+    }
+    free(super_frame);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamBufs
+ *
+ * DESCRIPTION: register the buffers of the reprocess channel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : QCamera3StreamMem *
+ *==========================================================================*/
+QCamera3StreamMem* QCamera3ReprocessChannel::getStreamBufs(uint32_t len)
+{
+    int rc = 0;
+    if (mReprocessType == REPROCESS_TYPE_JPEG) {
+        mMemory = new QCamera3StreamMem(mNumBuffers, false);
+        if (!mMemory) {
+            ALOGE("%s: unable to create reproc memory", __func__);
+            return NULL;
+        }
+        mFrameLen = len;
+        return mMemory;
+    }
+    return &mGrallocMemory;
+}
+
+/*===========================================================================
+ * FUNCTION   : putStreamBufs
+ *
+ * DESCRIPTION: release the reprocess channel buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     :
+ *==========================================================================*/
+void QCamera3ReprocessChannel::putStreamBufs()
+{
+   if (mReprocessType == REPROCESS_TYPE_JPEG) {
+       mMemory->deallocate();
+       delete mMemory;
+       mMemory = NULL;
+       mFreeBufferList.clear();
+   } else {
+       mGrallocMemory.unregisterBuffers();
+   }
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3ReprocessChannel
+ *
+ * DESCRIPTION: destructor of QCamera3ReprocessChannel
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3ReprocessChannel::~QCamera3ReprocessChannel()
+{
+    if (m_bIsActive)
+        stop();
+
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+        if (mStreams[i] != NULL) {
+            delete mStreams[i];
+            mStreams[i] = 0;
+        }
+    }
+    if (m_handle) {
+        m_camOps->delete_channel(m_camHandle, m_handle);
+        ALOGE("%s: deleting channel %d", __func__, m_handle);
+        m_handle = 0;
+    }
+    m_numStreams = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : start
+ *
+ * DESCRIPTION: start reprocess channel.
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ReprocessChannel::start()
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+
+    rc = QCamera3Channel::start();
+
+    if (rc == NO_ERROR) {
+       rc = m_camOps->start_channel(m_camHandle, m_handle);
+
+       // Check failure
+       if (rc != NO_ERROR) {
+           ALOGE("%s: start_channel failed %d", __func__, rc);
+           QCamera3Channel::stop();
+       }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop
+ *
+ * DESCRIPTION: stop reprocess channel.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ReprocessChannel::stop()
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+
+    rc = QCamera3Channel::stop();
+
+    rc != m_camOps->stop_channel(m_camHandle, m_handle);
+
+    unmapOfflineBuffers(true);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getStreamBySrcHandle
+ *
+ * DESCRIPTION: find reprocess stream by its source stream handle
+ *
+ * PARAMETERS :
+ *   @srcHandle : source stream handle
+ *
+ * RETURN     : ptr to reprocess stream if found. NULL if not found
+ *==========================================================================*/
+QCamera3Stream * QCamera3ReprocessChannel::getStreamBySrcHandle(uint32_t srcHandle)
+{
+    QCamera3Stream *pStream = NULL;
+
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+        if (mSrcStreamHandles[i] == srcHandle) {
+            pStream = mStreams[i];
+            break;
+        }
+    }
+    return pStream;
+}
+
+/*===========================================================================
+ * FUNCTION   : getSrcStreamBySrcHandle
+ *
+ * DESCRIPTION: find source stream by source stream handle
+ *
+ * PARAMETERS :
+ *   @srcHandle : source stream handle
+ *
+ * RETURN     : ptr to reprocess stream if found. NULL if not found
+ *==========================================================================*/
+QCamera3Stream * QCamera3ReprocessChannel::getSrcStreamBySrcHandle(uint32_t srcHandle)
+{
+    QCamera3Stream *pStream = NULL;
+
+    if (NULL == m_pSrcChannel) {
+        return NULL;
+    }
+
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+        if (mSrcStreamHandles[i] == srcHandle) {
+            pStream = m_pSrcChannel->getStreamByIndex(i);
+            break;
+        }
+    }
+    return pStream;
+}
+
+/*===========================================================================
+ * FUNCTION   : unmapOfflineBuffers
+ *
+ * DESCRIPTION: Unmaps offline buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ReprocessChannel::unmapOfflineBuffers(bool all)
+{
+    int rc = NO_ERROR;
+    if (!mOfflineBuffers.empty()) {
+        QCamera3Stream *stream = NULL;
+        List<OfflineBuffer>::iterator it = mOfflineBuffers.begin();
+        for (; it != mOfflineBuffers.end(); it++) {
+           stream = (*it).stream;
+           if (NULL != stream) {
+               rc = stream->unmapBuf((*it).type,
+                                     (*it).index,
+                                        -1);
+               if (NO_ERROR != rc) {
+                   ALOGE("%s: Error during offline buffer unmap %d",
+                         __func__, rc);
+               }
+               CDBG("%s: Unmapped buffer with index %d", __func__, (*it).index);
+           }
+           if (!all) {
+               mOfflineBuffers.erase(it);
+               break;
+           }
+        }
+        if (all) {
+           mOfflineBuffers.clear();
+        }
+    }
+
+    if (!mOfflineMetaBuffers.empty()) {
+        QCamera3Stream *stream = NULL;
+        List<OfflineBuffer>::iterator it = mOfflineMetaBuffers.begin();
+        for (; it != mOfflineMetaBuffers.end(); it++) {
+           stream = (*it).stream;
+           if (NULL != stream) {
+               rc = stream->unmapBuf((*it).type,
+                                     (*it).index,
+                                        -1);
+               if (NO_ERROR != rc) {
+                   ALOGE("%s: Error during offline buffer unmap %d",
+                         __func__, rc);
+               }
+               CDBG("%s: Unmapped meta buffer with index %d", __func__, (*it).index);
+           }
+           if (!all) {
+               mOfflineMetaBuffers.erase(it);
+               break;
+           }
+        }
+        if (all) {
+           mOfflineMetaBuffers.clear();
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : bufDone
+ *
+ * DESCRIPTION: Return reprocess stream buffer to free buffer list.
+ *              Note that this function doesn't queue buffer back to kernel.
+ *              It's up to doReprocessOffline to do that instead.
+ * PARAMETERS :
+ *   @recvd_frame  : stream buf frame to be returned
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ReprocessChannel::bufDone(mm_camera_super_buf_t *recvd_frame)
+{
+    int rc = NO_ERROR;
+    if (recvd_frame && recvd_frame->num_bufs == 1) {
+        Mutex::Autolock lock(mFreeBuffersLock);
+        uint32_t buf_idx = recvd_frame->bufs[0]->buf_idx;
+        mFreeBufferList.push_back(buf_idx);
+
+    } else {
+        ALOGE("%s: Fatal. Not supposed to be here", __func__);
+        rc = BAD_VALUE;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : overrideMetadata
+ *
+ * DESCRIPTION: Override metadata entry such as rotation, crop, and CDS info.
+ *
+ * PARAMETERS :
+ *   @frame     : input frame from source stream
+ *   meta_buffer: metadata buffer
+ *   @metadata  : corresponding metadata
+ *   @fwk_frame :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ReprocessChannel::overrideMetadata(qcamera_hal3_pp_buffer_t *pp_buffer,
+        mm_camera_buf_def_t *meta_buffer, jpeg_settings_t *jpeg_settings,
+        qcamera_fwk_input_pp_data_t &fwk_frame)
+{
+    int32_t rc = NO_ERROR;
+    QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)mUserData;
+    if ((NULL == meta_buffer) || (NULL == pp_buffer) || (NULL == pp_buffer->input) ||
+            (NULL == hal_obj)) {
+        return BAD_VALUE;
+    }
+
+    metadata_buffer_t *meta = (metadata_buffer_t *)meta_buffer->buffer;
+    mm_camera_super_buf_t *frame = pp_buffer->input;
+    if (NULL == meta) {
+        return BAD_VALUE;
+    }
+
+    for (uint32_t i = 0; i < frame->num_bufs; i++) {
+        QCamera3Stream *pStream = getStreamBySrcHandle(frame->bufs[i]->stream_id);
+        QCamera3Stream *pSrcStream = getSrcStreamBySrcHandle(frame->bufs[i]->stream_id);
+
+        if (pStream != NULL && pSrcStream != NULL) {
+            if (jpeg_settings) {
+                // Find rotation info for reprocess stream
+                cam_rotation_info_t rotation_info;
+                memset(&rotation_info, 0, sizeof(rotation_info));
+                if (jpeg_settings->jpeg_orientation == 0) {
+                   rotation_info.rotation = ROTATE_0;
+                } else if (jpeg_settings->jpeg_orientation == 90) {
+                   rotation_info.rotation = ROTATE_90;
+                } else if (jpeg_settings->jpeg_orientation == 180) {
+                   rotation_info.rotation = ROTATE_180;
+                } else if (jpeg_settings->jpeg_orientation == 270) {
+                   rotation_info.rotation = ROTATE_270;
+                }
+                rotation_info.streamId = mStreams[0]->getMyServerID();
+                ADD_SET_PARAM_ENTRY_TO_BATCH(meta, CAM_INTF_PARM_ROTATION, rotation_info);
+            }
+
+            // Find and insert crop info for reprocess stream
+            IF_META_AVAILABLE(cam_crop_data_t, crop_data, CAM_INTF_META_CROP_DATA, meta) {
+                if (MAX_NUM_STREAMS > crop_data->num_of_streams) {
+                    for (int j = 0; j < crop_data->num_of_streams; j++) {
+                        if (crop_data->crop_info[j].stream_id ==
+                                pSrcStream->getMyServerID()) {
+
+                            // Store crop/roi information for offline reprocess
+                            // in the reprocess stream slot
+                            crop_data->crop_info[crop_data->num_of_streams].crop =
+                                    crop_data->crop_info[j].crop;
+                            crop_data->crop_info[crop_data->num_of_streams].roi_map =
+                                    crop_data->crop_info[j].roi_map;
+                            crop_data->crop_info[crop_data->num_of_streams].stream_id =
+                                    mStreams[0]->getMyServerID();
+                            crop_data->num_of_streams++;
+
+                            CDBG("%s: Reprocess stream server id: %d",
+                                    __func__, mStreams[0]->getMyServerID());
+                            CDBG("%s: Found offline reprocess crop %dx%d %dx%d",
+                                    __func__,
+                                    crop_data->crop_info[j].crop.left,
+                                    crop_data->crop_info[j].crop.top,
+                                    crop_data->crop_info[j].crop.width,
+                                    crop_data->crop_info[j].crop.height);
+                            CDBG("%s: Found offline reprocess roimap %dx%d %dx%d",
+                                    __func__,
+                                    crop_data->crop_info[j].roi_map.left,
+                                    crop_data->crop_info[j].roi_map.top,
+                                    crop_data->crop_info[j].roi_map.width,
+                                    crop_data->crop_info[j].roi_map.height);
+
+                            break;
+                        }
+                    }
+                } else {
+                    ALOGE("%s: No space to add reprocess stream crop/roi information",
+                            __func__);
+                }
+            }
+
+            IF_META_AVAILABLE(cam_cds_data_t, cdsInfo, CAM_INTF_META_CDS_DATA, meta) {
+                uint8_t cnt = cdsInfo->num_of_streams;
+                if (cnt <= MAX_NUM_STREAMS) {
+                    cam_stream_cds_info_t repro_cds_info;
+                    memset(&repro_cds_info, 0, sizeof(repro_cds_info));
+                    repro_cds_info.stream_id = mStreams[0]->getMyServerID();
+                    for (size_t i = 0; i < cnt; i++) {
+                        if (cdsInfo->cds_info[i].stream_id ==
+                                pSrcStream->getMyServerID()) {
+                            repro_cds_info.cds_enable =
+                                    cdsInfo->cds_info[i].cds_enable;
+                            break;
+                        }
+                    }
+                    cdsInfo->num_of_streams = 1;
+                    cdsInfo->cds_info[0] = repro_cds_info;
+                } else {
+                    ALOGE("%s: No space to add reprocess stream cds information",
+                            __func__);
+                }
+            }
+
+            fwk_frame.input_buffer = *frame->bufs[i];
+            fwk_frame.metadata_buffer = *meta_buffer;
+            fwk_frame.output_buffer = pp_buffer->output;
+            break;
+        } else {
+            ALOGE("%s: Source/Re-process streams are invalid", __func__);
+            rc |= BAD_VALUE;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+* FUNCTION : overrideFwkMetadata
+*
+* DESCRIPTION: Override frameworks metadata such as crop, and CDS data.
+*
+* PARAMETERS :
+* @frame : input frame for reprocessing
+*
+* RETURN : int32_t type of status
+* NO_ERROR -- success
+* none-zero failure code
+*==========================================================================*/
+int32_t QCamera3ReprocessChannel::overrideFwkMetadata(
+        qcamera_fwk_input_pp_data_t *frame)
+{
+    if (NULL == frame) {
+        ALOGE("%s: Incorrect input frame", __func__);
+        return BAD_VALUE;
+    }
+
+
+    if (NULL == frame->metadata_buffer.buffer) {
+        ALOGE("%s: No metadata available", __func__);
+        return BAD_VALUE;
+    }
+
+    // Find and insert crop info for reprocess stream
+    metadata_buffer_t *meta = (metadata_buffer_t *) frame->metadata_buffer.buffer;
+    IF_META_AVAILABLE(cam_crop_data_t, crop_data, CAM_INTF_META_CROP_DATA, meta) {
+        if (1 == crop_data->num_of_streams) {
+            // Store crop/roi information for offline reprocess
+            // in the reprocess stream slot
+            crop_data->crop_info[crop_data->num_of_streams].crop =
+                    crop_data->crop_info[0].crop;
+            crop_data->crop_info[crop_data->num_of_streams].roi_map =
+                    crop_data->crop_info[0].roi_map;
+            crop_data->crop_info[crop_data->num_of_streams].stream_id =
+                    mStreams[0]->getMyServerID();
+            crop_data->num_of_streams++;
+
+            CDBG("%s: Reprocess stream server id: %d",
+                    __func__, mStreams[0]->getMyServerID());
+            CDBG("%s: Found offline reprocess crop %dx%d %dx%d", __func__,
+                    crop_data->crop_info[0].crop.left,
+                    crop_data->crop_info[0].crop.top,
+                    crop_data->crop_info[0].crop.width,
+                    crop_data->crop_info[0].crop.height);
+            CDBG("%s: Found offline reprocess roi map %dx%d %dx%d", __func__,
+                    crop_data->crop_info[0].roi_map.left,
+                    crop_data->crop_info[0].roi_map.top,
+                    crop_data->crop_info[0].roi_map.width,
+                    crop_data->crop_info[0].roi_map.height);
+        } else {
+            ALOGE("%s: Incorrect number of offline crop data entries %d",
+                    __func__,
+                    crop_data->num_of_streams);
+            return BAD_VALUE;
+        }
+    } else {
+        CDBG_HIGH("%s: Crop data not present", __func__);
+    }
+
+    IF_META_AVAILABLE(cam_cds_data_t, cdsInfo, CAM_INTF_META_CDS_DATA, meta) {
+        if (1 == cdsInfo->num_of_streams) {
+            cdsInfo->cds_info[0].stream_id = mStreams[0]->getMyServerID();
+        } else {
+            ALOGE("%s: Incorrect number of offline cds info entries %d",
+                    __func__, cdsInfo->num_of_streams);
+            return BAD_VALUE;
+        }
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : doReprocessOffline
+ *
+ * DESCRIPTION: request to do a reprocess on the frame
+ *
+ * PARAMETERS :
+ *   @frame     : input frame for reprocessing
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+ int32_t QCamera3ReprocessChannel::doReprocessOffline(qcamera_fwk_input_pp_data_t *frame)
+{
+    int32_t rc = 0;
+    int index;
+    OfflineBuffer mappedBuffer;
+
+    if (m_numStreams < 1) {
+        ALOGE("%s: No reprocess stream is created", __func__);
+        return -1;
+    }
+
+    if (NULL == frame) {
+        ALOGE("%s: Incorrect input frame", __func__);
+        return BAD_VALUE;
+    }
+
+    if (NULL == frame->metadata_buffer.buffer) {
+        ALOGE("%s: No metadata available", __func__);
+        return BAD_VALUE;
+    }
+
+    if (NULL == frame->input_buffer.buffer) {
+        ALOGE("%s: No input buffer available", __func__);
+        return BAD_VALUE;
+    }
+
+    if ((0 == m_numStreams) || (NULL == mStreams[0])) {
+        ALOGE("%s: Reprocess stream not initialized!", __func__);
+        return NO_INIT;
+    }
+
+    QCamera3Stream *pStream = mStreams[0];
+
+    //qbuf the output buffer if it was allocated by the framework
+    if (mReprocessType != REPROCESS_TYPE_JPEG && frame->output_buffer != NULL) {
+        if(!m_bIsActive) {
+            rc = registerBuffer(frame->output_buffer, mIsType);
+            if (NO_ERROR != rc) {
+                ALOGE("%s: On-the-fly buffer registration failed %d",
+                        __func__, rc);
+                return rc;
+            }
+
+            rc = start();
+            if (NO_ERROR != rc) {
+                return rc;
+            }
+        }
+        index = mGrallocMemory.getMatchBufIndex((void*)frame->output_buffer);
+        if(index < 0) {
+            rc = registerBuffer(frame->output_buffer, mIsType);
+            if (NO_ERROR != rc) {
+                ALOGE("%s: On-the-fly buffer registration failed %d",
+                        __func__, rc);
+                return rc;
+            }
+
+            index = mGrallocMemory.getMatchBufIndex((void*)frame->output_buffer);
+            if (index < 0) {
+                ALOGE("%s: Could not find object among registered buffers",
+                        __func__);
+                return DEAD_OBJECT;
+            }
+        }
+        rc = pStream->bufDone(index);
+        if(rc != NO_ERROR) {
+            ALOGE("%s: Failed to Q new buffer to stream",__func__);
+            return rc;
+        }
+        rc = mGrallocMemory.markFrameNumber(index, frame->frameNumber);
+
+    } else if (mReprocessType == REPROCESS_TYPE_JPEG) {
+        Mutex::Autolock lock(mFreeBuffersLock);
+        uint32_t bufIdx;
+        if (mFreeBufferList.empty()) {
+            rc = mMemory->allocateOne(mFrameLen);
+            if (rc < 0) {
+                ALOGE("%s: Failed allocating heap buffer. Fatal", __func__);
+                return BAD_VALUE;
+            } else {
+                bufIdx = (uint32_t)rc;
+            }
+        } else {
+            bufIdx = *(mFreeBufferList.begin());
+            mFreeBufferList.erase(mFreeBufferList.begin());
+        }
+
+        mMemory->markFrameNumber(bufIdx, frame->frameNumber);
+        rc = pStream->bufDone(bufIdx);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Failed to queue new buffer to stream", __func__);
+            return rc;
+        }
+    }
+
+    int32_t max_idx = (int32_t) (mNumBuffers - 1);
+    //loop back the indices if max burst count reached
+    if (mOfflineBuffersIndex == max_idx) {
+       mOfflineBuffersIndex = -1;
+    }
+    uint32_t buf_idx = (uint32_t)(mOfflineBuffersIndex + 1);
+    rc = pStream->mapBuf(
+            CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF,
+            buf_idx, -1,
+            frame->input_buffer.fd, frame->input_buffer.frame_len);
+    if (NO_ERROR == rc) {
+        mappedBuffer.index = buf_idx;
+        mappedBuffer.stream = pStream;
+        mappedBuffer.type = CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF;
+        mOfflineBuffers.push_back(mappedBuffer);
+        mOfflineBuffersIndex = (int32_t)buf_idx;
+        CDBG("%s: Mapped buffer with index %d", __func__, mOfflineBuffersIndex);
+    }
+
+    max_idx = (int32_t) ((mNumBuffers * 2) - 1);
+    //loop back the indices if max burst count reached
+    if (mOfflineMetaIndex == max_idx) {
+       mOfflineMetaIndex = (int32_t) (mNumBuffers - 1);
+    }
+    uint32_t meta_buf_idx = (uint32_t)(mOfflineMetaIndex + 1);
+    rc |= pStream->mapBuf(
+            CAM_MAPPING_BUF_TYPE_OFFLINE_META_BUF,
+            meta_buf_idx, -1,
+            frame->metadata_buffer.fd, frame->metadata_buffer.frame_len);
+    if (NO_ERROR == rc) {
+        mappedBuffer.index = meta_buf_idx;
+        mappedBuffer.stream = pStream;
+        mappedBuffer.type = CAM_MAPPING_BUF_TYPE_OFFLINE_META_BUF;
+        mOfflineMetaBuffers.push_back(mappedBuffer);
+        mOfflineMetaIndex = (int32_t)meta_buf_idx;
+        CDBG("%s: Mapped meta buffer with index %d", __func__, mOfflineMetaIndex);
+    }
+
+    if (rc == NO_ERROR) {
+        cam_stream_parm_buffer_t param;
+        memset(&param, 0, sizeof(cam_stream_parm_buffer_t));
+        param.type = CAM_STREAM_PARAM_TYPE_DO_REPROCESS;
+        param.reprocess.buf_index = buf_idx;
+        param.reprocess.frame_idx = frame->input_buffer.frame_idx;
+        param.reprocess.meta_present = 1;
+        param.reprocess.meta_buf_index = meta_buf_idx;
+        rc = pStream->setParameter(param);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: stream setParameter for reprocess failed", __func__);
+        }
+    } else {
+        ALOGE("%s: Input buffer memory map failed: %d", __func__, rc);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : doReprocess
+ *
+ * DESCRIPTION: request to do a reprocess on the frame
+ *
+ * PARAMETERS :
+ *   @buf_fd     : fd to the input buffer that needs reprocess
+ *   @buf_lenght : length of the input buffer
+ *   @ret_val    : result of reprocess.
+ *                 Example: Could be faceID in case of register face image.
+ *   @meta_frame : metadata frame.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ReprocessChannel::doReprocess(int buf_fd, size_t buf_length,
+        int32_t &ret_val, mm_camera_super_buf_t *meta_frame)
+{
+    int32_t rc = 0;
+    if (m_numStreams < 1) {
+        ALOGE("%s: No reprocess stream is created", __func__);
+        return -1;
+    }
+    if (meta_frame == NULL) {
+        ALOGE("%s: Did not get corresponding metadata in time", __func__);
+        return -1;
+    }
+
+    uint8_t buf_idx = 0;
+    for (uint32_t i = 0; i < m_numStreams; i++) {
+        rc = mStreams[i]->mapBuf(CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF,
+                                 buf_idx, -1,
+                                 buf_fd, buf_length);
+
+        if (rc == NO_ERROR) {
+            cam_stream_parm_buffer_t param;
+            memset(&param, 0, sizeof(cam_stream_parm_buffer_t));
+            param.type = CAM_STREAM_PARAM_TYPE_DO_REPROCESS;
+            param.reprocess.buf_index = buf_idx;
+            param.reprocess.meta_present = 1;
+            param.reprocess.meta_stream_handle = m_pMetaChannel->mStreams[0]->getMyServerID();
+            param.reprocess.meta_buf_index = meta_frame->bufs[0]->buf_idx;
+            rc = mStreams[i]->setParameter(param);
+            if (rc == NO_ERROR) {
+                ret_val = param.reprocess.ret_val;
+            }
+            mStreams[i]->unmapBuf(CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF,
+                                  buf_idx, -1);
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : addReprocStreamsFromSource
+ *
+ * DESCRIPTION: add reprocess streams from input source channel
+ *
+ * PARAMETERS :
+ *   @config         : pp feature configuration
+ *   @src_config     : source reprocess configuration
+ *   @isType         : type of image stabilization required on this stream
+ *   @pMetaChannel   : ptr to metadata channel to get corresp. metadata
+ *
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3ReprocessChannel::addReprocStreamsFromSource(cam_pp_feature_config_t &pp_config,
+        const reprocess_config_t &src_config , cam_is_type_t is_type,
+        QCamera3Channel *pMetaChannel)
+{
+    int32_t rc = 0;
+    cam_stream_reproc_config_t reprocess_config;
+    cam_stream_type_t streamType;
+
+    cam_dimension_t streamDim = src_config.output_stream_dim;
+
+    if (NULL != src_config.src_channel) {
+        QCamera3Stream *pSrcStream = src_config.src_channel->getStreamByIndex(0);
+        if (pSrcStream == NULL) {
+           ALOGE("%s: source channel doesn't have a stream", __func__);
+           return BAD_VALUE;
+        }
+        mSrcStreamHandles[m_numStreams] = pSrcStream->getMyHandle();
+    }
+
+    streamType = CAM_STREAM_TYPE_OFFLINE_PROC;
+    reprocess_config.pp_type = CAM_OFFLINE_REPROCESS_TYPE;
+
+    reprocess_config.offline.input_fmt = src_config.stream_format;
+    reprocess_config.offline.input_dim = src_config.input_stream_dim;
+    reprocess_config.offline.input_buf_planes.plane_info =
+            src_config.input_stream_plane_info.plane_info;
+    reprocess_config.offline.num_of_bufs = (uint8_t)mNumBuffers;
+    reprocess_config.offline.input_type = src_config.stream_type;
+
+    reprocess_config.pp_feature_config = pp_config;
+    QCamera3Stream *pStream = new QCamera3Stream(m_camHandle,
+            m_handle,
+            m_camOps,
+            mPaddingInfo,
+            (QCamera3Channel*)this);
+    if (pStream == NULL) {
+        ALOGE("%s: No mem for Stream", __func__);
+        return NO_MEMORY;
+    }
+
+    rc = pStream->init(streamType, src_config.stream_format,
+            streamDim, ROTATE_0, &reprocess_config,
+            (uint8_t)mNumBuffers,
+            reprocess_config.pp_feature_config.feature_mask,
+            is_type,
+            0,/* batchSize */
+            QCamera3Channel::streamCbRoutine, this);
+
+    if (rc == 0) {
+        mStreams[m_numStreams] = pStream;
+        m_numStreams++;
+    } else {
+        ALOGE("%s: failed to create reprocess stream", __func__);
+        delete pStream;
+    }
+
+    if (rc == NO_ERROR) {
+        m_pSrcChannel = src_config.src_channel;
+        m_pMetaChannel = pMetaChannel;
+        mReprocessType = src_config.reprocess_type;
+        CDBG("%s: mReprocessType is %d", __func__, mReprocessType);
+    }
+    if(m_camOps->request_super_buf(m_camHandle,m_handle,1,0) < 0) {
+        ALOGE("%s: Request for super buffer failed",__func__);
+    }
+    return rc;
+}
+
+/* QCamera3SupportChannel methods */
+
+cam_dimension_t QCamera3SupportChannel::kDim = {640, 480};
+
+QCamera3SupportChannel::QCamera3SupportChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    cam_padding_info_t *paddingInfo,
+                    uint32_t postprocess_mask,
+                    cam_stream_type_t streamType,
+                    cam_dimension_t *dim,
+                    cam_format_t streamFormat,
+                    void *userData, uint32_t numBuffers) :
+                        QCamera3Channel(cam_handle, channel_handle, cam_ops,
+                                NULL, paddingInfo, postprocess_mask,
+                                userData, numBuffers),
+                        mMemory(NULL)
+{
+    memcpy(&mDim, dim, sizeof(cam_dimension_t));
+    mStreamType = streamType;
+    mStreamFormat = streamFormat;
+}
+
+QCamera3SupportChannel::~QCamera3SupportChannel()
+{
+    if (m_bIsActive)
+        stop();
+
+    if (mMemory) {
+        mMemory->deallocate();
+        delete mMemory;
+        mMemory = NULL;
+    }
+}
+
+int32_t QCamera3SupportChannel::initialize(cam_is_type_t isType)
+{
+    int32_t rc;
+
+    if (mMemory || m_numStreams > 0) {
+        ALOGE("%s: metadata channel already initialized", __func__);
+        return -EINVAL;
+    }
+
+    mIsType = isType;
+    rc = QCamera3Channel::addStream(mStreamType,
+        mStreamFormat, mDim, ROTATE_0, MIN_STREAMING_BUFFER_NUM,
+        mPostProcMask, mIsType);
+    if (rc < 0) {
+        ALOGE("%s: addStream failed", __func__);
+    }
+    return rc;
+}
+
+int32_t QCamera3SupportChannel::request(buffer_handle_t * /*buffer*/,
+                                                uint32_t /*frameNumber*/)
+{
+    return NO_ERROR;
+}
+
+void QCamera3SupportChannel::streamCbRoutine(
+                        mm_camera_super_buf_t *super_frame,
+                        QCamera3Stream * /*stream*/)
+{
+    if (super_frame == NULL || super_frame->num_bufs != 1) {
+        ALOGE("%s: super_frame is not valid", __func__);
+        return;
+    }
+    bufDone(super_frame);
+    free(super_frame);
+}
+
+QCamera3StreamMem* QCamera3SupportChannel::getStreamBufs(uint32_t len)
+{
+    int rc;
+    mMemory = new QCamera3StreamMem(mNumBuffers);
+    if (!mMemory) {
+        ALOGE("%s: unable to create heap memory", __func__);
+        return NULL;
+    }
+    rc = mMemory->allocateAll(len);
+    if (rc < 0) {
+        ALOGE("%s: unable to allocate heap memory", __func__);
+        delete mMemory;
+        mMemory = NULL;
+        return NULL;
+    }
+    return mMemory;
+}
+
+void QCamera3SupportChannel::putStreamBufs()
+{
+    mMemory->deallocate();
+    delete mMemory;
+    mMemory = NULL;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL3/QCamera3Channel.h b/camera/QCamera2/HAL3/QCamera3Channel.h
new file mode 100644
index 0000000..3bf4125
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3Channel.h
@@ -0,0 +1,589 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA3_CHANNEL_H__
+#define __QCAMERA3_CHANNEL_H__
+
+#include <hardware/camera3.h>
+#include "QCamera3Stream.h"
+#include "QCamera3Mem.h"
+#include "QCamera3StreamMem.h"
+#include "QCamera3PostProc.h"
+#include "QCamera3HALHeader.h"
+#include "utils/Vector.h"
+#include <utils/List.h>
+
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+using namespace android;
+
+#define MIN_STREAMING_BUFFER_NUM 7+11
+
+namespace qcamera {
+
+typedef void (*channel_cb_routine)(mm_camera_super_buf_t *metadata,
+                                camera3_stream_buffer_t *buffer,
+                                uint32_t frame_number, bool isInputBuffer,
+                                void *userdata);
+class QCamera3Channel
+{
+public:
+    QCamera3Channel(uint32_t cam_handle,
+                   uint32_t channel_handle,
+                   mm_camera_ops_t *cam_ops,
+                   channel_cb_routine cb_routine,
+                   cam_padding_info_t *paddingInfo,
+                   uint32_t postprocess_mask,
+                   void *userData, uint32_t numBuffers);
+    virtual ~QCamera3Channel();
+
+    virtual int32_t start();
+    virtual int32_t stop();
+    virtual int32_t setBatchSize(uint32_t);
+    virtual int32_t queueBatchBuf();
+    virtual int32_t setPerFrameMapUnmap(bool enable);
+    int32_t bufDone(mm_camera_super_buf_t *recvd_frame);
+    int32_t setBundleInfo(const cam_bundle_config_t &bundleInfo);
+
+    virtual uint32_t getStreamTypeMask();
+    uint32_t getStreamID(uint32_t streamMask);
+    virtual int32_t initialize(cam_is_type_t isType) = 0;
+    virtual int32_t request(buffer_handle_t * /*buffer*/,
+                uint32_t /*frameNumber*/){ return 0;};
+    virtual int32_t request(buffer_handle_t * /*buffer*/,
+                uint32_t /*frameNumber*/,
+                camera3_stream_buffer_t* /*pInputBuffer*/,
+                metadata_buffer_t* /*metadata*/){ return 0;};
+    virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                            QCamera3Stream *stream) = 0;
+
+    virtual int32_t registerBuffer(buffer_handle_t *buffer, cam_is_type_t isType) = 0;
+    virtual QCamera3StreamMem *getStreamBufs(uint32_t len) = 0;
+    virtual void putStreamBufs() = 0;
+
+    QCamera3Stream *getStreamByHandle(uint32_t streamHandle);
+    uint32_t getMyHandle() const {return m_handle;};
+    uint32_t getNumOfStreams() const {return m_numStreams;};
+    uint32_t getNumBuffers() const {return mNumBuffers;};
+    QCamera3Stream *getStreamByIndex(uint32_t index);
+
+    static void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                QCamera3Stream *stream, void *userdata);
+    void dumpYUV(mm_camera_buf_def_t *frame, cam_dimension_t dim,
+            cam_frame_len_offset_t offset, uint8_t name);
+
+    void *mUserData;
+    cam_padding_info_t *mPaddingInfo;
+    QCamera3Stream *mStreams[MAX_STREAM_NUM_IN_BUNDLE];
+    uint32_t m_numStreams;
+protected:
+
+    int32_t addStream(cam_stream_type_t streamType,
+                      cam_format_t streamFormat,
+                      cam_dimension_t streamDim,
+                      cam_rotation_t streamRotation,
+                      uint8_t minStreamBufnum,
+                      uint32_t postprocessMask,
+                      cam_is_type_t isType,
+                      uint32_t batchSize = 0);
+    int32_t allocateStreamInfoBuf(camera3_stream_t *stream);
+
+    uint32_t m_camHandle;
+    mm_camera_ops_t *m_camOps;
+    bool m_bIsActive;
+
+    uint32_t m_handle;
+
+
+    mm_camera_buf_notify_t mDataCB;
+
+
+    QCamera3HeapMemory *mStreamInfoBuf;
+    channel_cb_routine mChannelCB;
+    //cam_padding_info_t *mPaddingInfo;
+    uint32_t mPostProcMask;
+    uint8_t mYUVDump;
+    cam_is_type_t mIsType;
+    uint32_t mNumBuffers;
+    /* Enable unmapping of buffer before issuing buffer callback. Default value
+     * for this flag is true and is selectively set to false for the usecases
+     * such as HFR to avoid any performance hit due to mapping/unmapping */
+    bool    mPerFrameMapUnmapEnable;
+};
+
+/* QCamera3ProcessingChannel is used to handle all streams that are directly
+ * generated by hardware and given to frameworks without any postprocessing at HAL.
+ * It also handles input streams that require reprocessing by hardware and then
+ * returned to frameworks. */
+class QCamera3ProcessingChannel : public QCamera3Channel
+{
+public:
+   QCamera3ProcessingChannel(uint32_t cam_handle,
+           uint32_t channel_handle,
+           mm_camera_ops_t *cam_ops,
+           channel_cb_routine cb_routine,
+           cam_padding_info_t *paddingInfo,
+           void *userData,
+           camera3_stream_t *stream,
+           cam_stream_type_t stream_type,
+           uint32_t postprocess_mask,
+           QCamera3Channel *metadataChannel,
+           uint32_t numBuffers = MAX_INFLIGHT_REQUESTS);
+
+    ~QCamera3ProcessingChannel();
+
+    virtual int32_t initialize(cam_is_type_t isType);
+    virtual int32_t request(buffer_handle_t *buffer,
+            uint32_t frameNumber,
+            camera3_stream_buffer_t* pInputBuffer,
+            metadata_buffer_t* metadata);
+    virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+            QCamera3Stream *stream);
+    virtual QCamera3StreamMem *getStreamBufs(uint32_t len);
+    virtual void putStreamBufs();
+    virtual int32_t registerBuffer(buffer_handle_t *buffer, cam_is_type_t isType);
+
+    virtual int32_t stop();
+
+    virtual reprocess_type_t getReprocessType() = 0;
+
+    virtual void reprocessCbRoutine(buffer_handle_t *resultBuffer,
+            uint32_t resultFrameNumber);
+
+    int32_t queueReprocMetadata(mm_camera_super_buf_t *metadata);
+    int32_t metadataBufDone(mm_camera_super_buf_t *recvd_frame);
+    int32_t translateStreamTypeAndFormat(camera3_stream_t *stream,
+            cam_stream_type_t &streamType,
+            cam_format_t &streamFormat);
+    int32_t setReprocConfig(reprocess_config_t &reproc_cfg,
+            camera3_stream_buffer_t *pInputBuffer,
+            metadata_buffer_t *metadata,
+            cam_format_t streamFormat, cam_dimension_t dim);
+    int32_t setFwkInputPPData(qcamera_fwk_input_pp_data_t *src_frame,
+            camera3_stream_buffer_t *pInputBuffer,
+            reprocess_config_t *reproc_cfg,
+            metadata_buffer_t *metadata,
+            buffer_handle_t *output_buffer,
+            uint32_t frameNumber);
+    int32_t checkStreamCbErrors(mm_camera_super_buf_t *super_frame,
+            QCamera3Stream *stream);
+    int32_t getStreamSize(cam_dimension_t &dim);
+
+    QCamera3PostProcessor m_postprocessor; // post processor
+
+protected:
+    bool isWNREnabled() {return m_bWNROn;};
+    void startPostProc(const reprocess_config_t &reproc_cfg);
+    void issueChannelCb(buffer_handle_t *resultBuffer,
+            uint32_t resultFrameNumber);
+    int32_t releaseOfflineMemory(uint32_t resultFrameNumber);
+
+    QCamera3StreamMem mMemory; //output buffer allocated by fwk
+    camera3_stream_t *mCamera3Stream;
+    uint32_t mNumBufs;
+    cam_stream_type_t mStreamType;
+    cam_format_t mStreamFormat;
+    uint8_t mIntent;
+
+    bool mPostProcStarted;
+    bool mInputBufferConfig;   // Set when the processing channel is configured
+                               // for processing input(framework) buffers
+
+    QCamera3Channel *m_pMetaChannel;
+    mm_camera_super_buf_t *mMetaFrame;
+    QCamera3StreamMem mOfflineMemory;      //reprocessing input buffer
+    QCamera3StreamMem mOfflineMetaMemory; //reprocessing metadata buffer
+    List<uint32_t> mFreeOfflineMetaBuffersList;
+    Mutex mFreeOfflineMetaBuffersLock;
+
+private:
+
+    bool m_bWNROn;
+};
+
+/* QCamera3RegularChannel is used to handle all streams that are directly
+ * generated by hardware and given to frameworks without any postprocessing at HAL.
+ * Examples are: all IMPLEMENTATION_DEFINED streams, CPU_READ streams. */
+class QCamera3RegularChannel : public QCamera3ProcessingChannel
+{
+public:
+    QCamera3RegularChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    channel_cb_routine cb_routine,
+                    cam_padding_info_t *paddingInfo,
+                    void *userData,
+                    camera3_stream_t *stream,
+                    cam_stream_type_t stream_type,
+                    uint32_t postprocess_mask,
+                    QCamera3Channel *metadataChannel,
+                    uint32_t numBuffers = MAX_INFLIGHT_REQUESTS);
+
+    virtual ~QCamera3RegularChannel();
+
+    virtual int32_t setBatchSize(uint32_t batchSize);
+    virtual uint32_t getStreamTypeMask();
+    virtual int32_t queueBatchBuf();
+    virtual int32_t initialize(cam_is_type_t isType);
+    virtual int32_t request(buffer_handle_t *buffer, uint32_t frameNumber);
+    virtual reprocess_type_t getReprocessType();
+
+private:
+    int32_t initialize(struct private_handle_t *priv_handle);
+
+    cam_rotation_t mRotation;
+    uint32_t mBatchSize;
+
+};
+
+/* QCamera3MetadataChannel is for metadata stream generated by camera daemon. */
+class QCamera3MetadataChannel : public QCamera3Channel
+{
+public:
+    QCamera3MetadataChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    channel_cb_routine cb_routine,
+                    cam_padding_info_t *paddingInfo,
+                    uint32_t postprocess_mask,
+                    void *userData,
+                    uint32_t numBuffers = MIN_STREAMING_BUFFER_NUM);
+    virtual ~QCamera3MetadataChannel();
+
+    virtual int32_t initialize(cam_is_type_t isType);
+
+    virtual int32_t request(buffer_handle_t *buffer, uint32_t frameNumber);
+    virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                            QCamera3Stream *stream);
+
+    virtual QCamera3StreamMem *getStreamBufs(uint32_t le);
+    virtual void putStreamBufs();
+    virtual int32_t registerBuffer(buffer_handle_t * /*buffer*/, cam_is_type_t /*isType*/)
+            { return NO_ERROR; };
+
+private:
+    QCamera3StreamMem *mMemory;
+};
+
+/* QCamera3RawChannel is for opaqueu/cross-platform raw stream containing
+ * vendor specific bayer data or 16-bit unpacked bayer data */
+class QCamera3RawChannel : public QCamera3RegularChannel
+{
+public:
+    QCamera3RawChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    channel_cb_routine cb_routine,
+                    cam_padding_info_t *paddingInfo,
+                    void *userData,
+                    camera3_stream_t *stream,
+                    uint32_t postprocess_mask,
+                    QCamera3Channel *metadataChannel,
+                    bool raw_16 = false,
+                    uint32_t numBuffers = MAX_INFLIGHT_REQUESTS);
+
+    virtual ~QCamera3RawChannel();
+
+    virtual int32_t initialize(cam_is_type_t isType);
+
+    virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                            QCamera3Stream *stream);
+
+    virtual reprocess_type_t getReprocessType();
+
+private:
+    bool mRawDump;
+    bool mIsRaw16;
+
+    void dumpRawSnapshot(mm_camera_buf_def_t *frame);
+    void convertLegacyToRaw16(mm_camera_buf_def_t *frame);
+    void convertMipiToRaw16(mm_camera_buf_def_t *frame);
+};
+
+/*
+ * QCamera3RawDumpChannel is for internal use only for Raw dump
+ */
+
+class QCamera3RawDumpChannel : public QCamera3Channel
+{
+public:
+    QCamera3RawDumpChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    cam_dimension_t rawDumpSize,
+                    cam_padding_info_t *paddingInfo,
+                    void *userData,
+                    uint32_t postprocess_mask, uint32_t numBuffers = 3U);
+    virtual ~QCamera3RawDumpChannel();
+    virtual int32_t initialize(cam_is_type_t isType);
+    virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                            QCamera3Stream *stream);
+    virtual QCamera3StreamMem *getStreamBufs(uint32_t le);
+    virtual void putStreamBufs();
+    virtual int32_t registerBuffer(buffer_handle_t * /*buffer*/, cam_is_type_t /*isType*/)
+            { return NO_ERROR; };
+    virtual int32_t request(buffer_handle_t *buffer, uint32_t frameNumber);
+    void dumpRawSnapshot(mm_camera_buf_def_t *frame);
+
+public:
+    cam_dimension_t mDim;
+
+private:
+    bool mRawDump;
+    QCamera3StreamMem *mMemory;
+};
+
+/* QCamera3YUVChannel is used to handle flexible YUV streams that are directly
+ * generated by hardware and given to frameworks without any postprocessing at HAL.
+ * It is also used to handle input buffers that generate YUV outputs */
+class QCamera3YUVChannel : public QCamera3ProcessingChannel
+{
+public:
+    QCamera3YUVChannel(uint32_t cam_handle,
+            uint32_t channel_handle,
+            mm_camera_ops_t *cam_ops,
+            channel_cb_routine cb_routine,
+            cam_padding_info_t *paddingInfo,
+            void *userData,
+            camera3_stream_t *stream,
+            cam_stream_type_t stream_type,
+            uint32_t postprocess_mask,
+            QCamera3Channel *metadataChannel);
+    ~QCamera3YUVChannel();
+    virtual int32_t initialize(cam_is_type_t isType);
+    virtual int32_t request(buffer_handle_t *buffer,
+            uint32_t frameNumber,
+            camera3_stream_buffer_t* pInputBuffer,
+            metadata_buffer_t* metadata, bool &needMetadata);
+    virtual reprocess_type_t getReprocessType();
+    virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+            QCamera3Stream *stream);
+    virtual void putStreamBufs();
+    virtual void reprocessCbRoutine(buffer_handle_t *resultBuffer,
+        uint32_t resultFrameNumber);
+
+private:
+    typedef struct {
+        uint32_t frameNumber;
+        bool offlinePpFlag;
+        buffer_handle_t *output;
+        mm_camera_super_buf_t *callback_buffer;
+    } PpInfo;
+
+    // Whether offline postprocessing is required for this channel
+    bool mBypass;
+    uint32_t mFrameLen;
+
+    // Current edge, noise, and crop region setting
+    cam_edge_application_t mEdgeMode;
+    uint32_t mNoiseRedMode;
+    cam_crop_region_t mCropRegion;
+
+    // Mutex to protect mOfflinePpFlagMap and mFreeHeapBufferList
+    Mutex mOfflinePpLock;
+    // Map between free number and whether the request needs to be
+    // postprocessed.
+    List<PpInfo> mOfflinePpInfoList;
+    // Heap buffer index list
+    List<uint32_t> mFreeHeapBufferList;
+
+private:
+    bool needsFramePostprocessing(metadata_buffer_t* meta);
+    int32_t handleOfflinePpCallback(uint32_t resultFrameNumber,
+            Vector<mm_camera_super_buf_t *>& pendingCbs);
+};
+
+/* QCamera3PicChannel is for JPEG stream, which contains a YUV stream generated
+ * by the hardware, and encoded to a JPEG stream */
+class QCamera3PicChannel : public QCamera3ProcessingChannel
+{
+public:
+    QCamera3PicChannel(uint32_t cam_handle,
+            uint32_t channel_handle,
+            mm_camera_ops_t *cam_ops,
+            channel_cb_routine cb_routine,
+            cam_padding_info_t *paddingInfo,
+            void *userData,
+            camera3_stream_t *stream,
+            uint32_t postprocess_mask,
+            bool is4KVideo,
+            bool isInputStreamConfigured,
+            QCamera3Channel *metadataChannel,
+            uint32_t numBuffers = MAX_INFLIGHT_REQUESTS);
+    ~QCamera3PicChannel();
+
+    virtual int32_t initialize(cam_is_type_t isType);
+    virtual int32_t request(buffer_handle_t *buffer,
+            uint32_t frameNumber,
+            camera3_stream_buffer_t* pInputBuffer,
+            metadata_buffer_t* metadata);
+    virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+            QCamera3Stream *stream);
+
+    virtual QCamera3StreamMem *getStreamBufs(uint32_t le);
+    virtual void putStreamBufs();
+    virtual reprocess_type_t getReprocessType();
+
+    QCamera3Exif *getExifData(metadata_buffer_t *metadata,
+            jpeg_settings_t *jpeg_settings);
+    void overrideYuvSize(uint32_t width, uint32_t height);
+    static void jpegEvtHandle(jpeg_job_status_t status,
+            uint32_t /*client_hdl*/,
+            uint32_t jobId,
+            mm_jpeg_output_t *p_output,
+            void *userdata);
+    static void dataNotifyCB(mm_camera_super_buf_t *recvd_frame,
+            void *userdata);
+
+private:
+    int32_t queueJpegSetting(uint32_t out_buf_index, metadata_buffer_t *metadata);
+
+public:
+    cam_dimension_t m_max_pic_dim;
+
+private:
+    uint32_t mNumSnapshotBufs;
+    uint32_t mYuvWidth, mYuvHeight;
+    int32_t mCurrentBufIndex;
+    bool mInputBufferHint;
+    QCamera3StreamMem *mYuvMemory;
+    // Keep a list of free buffers
+    Mutex mFreeBuffersLock;
+    List<uint32_t> mFreeBufferList;
+    uint32_t mFrameLen;
+};
+
+// reprocess channel class
+class QCamera3ReprocessChannel : public QCamera3Channel
+{
+public:
+    QCamera3ReprocessChannel(uint32_t cam_handle,
+                            uint32_t channel_handle,
+                            mm_camera_ops_t *cam_ops,
+                            channel_cb_routine cb_routine,
+                            cam_padding_info_t *paddingInfo,
+                            uint32_t postprocess_mask,
+                            void *userData, void *ch_hdl);
+    QCamera3ReprocessChannel();
+    virtual ~QCamera3ReprocessChannel();
+    // offline reprocess
+    virtual int32_t start();
+    virtual int32_t stop();
+    int32_t doReprocessOffline(qcamera_fwk_input_pp_data_t *frame);
+    int32_t doReprocess(int buf_fd, size_t buf_length, int32_t &ret_val,
+                        mm_camera_super_buf_t *meta_buf);
+    int32_t overrideMetadata(qcamera_hal3_pp_buffer_t *pp_buffer,
+            mm_camera_buf_def_t *meta_buffer,
+            jpeg_settings_t *jpeg_settings,
+            qcamera_fwk_input_pp_data_t &fwk_frame);
+    int32_t overrideFwkMetadata(qcamera_fwk_input_pp_data_t *frame);
+    virtual QCamera3StreamMem *getStreamBufs(uint32_t len);
+    virtual void putStreamBufs();
+    virtual int32_t initialize(cam_is_type_t isType);
+    int32_t unmapOfflineBuffers(bool all);
+    int32_t bufDone(mm_camera_super_buf_t *recvd_frame);
+    virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                            QCamera3Stream *stream);
+    static void dataNotifyCB(mm_camera_super_buf_t *recvd_frame,
+                                       void* userdata);
+    int32_t addReprocStreamsFromSource(cam_pp_feature_config_t &pp_config,
+           const reprocess_config_t &src_config,
+           cam_is_type_t is_type,
+           QCamera3Channel *pMetaChannel);
+    QCamera3Stream *getStreamBySrcHandle(uint32_t srcHandle);
+    QCamera3Stream *getSrcStreamBySrcHandle(uint32_t srcHandle);
+    virtual int32_t registerBuffer(buffer_handle_t * buffer, cam_is_type_t isType);
+
+public:
+    void *inputChHandle;
+
+private:
+    typedef struct {
+        QCamera3Stream *stream;
+        cam_mapping_buf_type type;
+        uint32_t index;
+    } OfflineBuffer;
+
+    android::List<OfflineBuffer> mOfflineBuffers;
+    android::List<OfflineBuffer> mOfflineMetaBuffers;
+    int32_t mOfflineBuffersIndex;
+    int32_t mOfflineMetaIndex;
+    uint32_t mFrameLen;
+    Mutex mFreeBuffersLock; // Lock for free heap buffers
+    List<int32_t> mFreeBufferList; // Free heap buffers list
+    reprocess_type_t mReprocessType;
+    uint32_t mSrcStreamHandles[MAX_STREAM_NUM_IN_BUNDLE];
+    QCamera3ProcessingChannel *m_pSrcChannel; // ptr to source channel for reprocess
+    QCamera3Channel *m_pMetaChannel;
+    QCamera3StreamMem *mMemory;
+    QCamera3StreamMem mGrallocMemory;
+};
+
+
+/* QCamera3SupportChannel is for HAL internal consumption only */
+class QCamera3SupportChannel : public QCamera3Channel
+{
+public:
+    QCamera3SupportChannel(uint32_t cam_handle,
+                    uint32_t channel_handle,
+                    mm_camera_ops_t *cam_ops,
+                    cam_padding_info_t *paddingInfo,
+                    uint32_t postprocess_mask,
+                    cam_stream_type_t streamType,
+                    cam_dimension_t *dim,
+                    cam_format_t streamFormat,
+                    void *userData,
+                    uint32_t numBuffers = MIN_STREAMING_BUFFER_NUM
+                    );
+    virtual ~QCamera3SupportChannel();
+
+    virtual int32_t initialize(cam_is_type_t isType);
+
+    virtual int32_t request(buffer_handle_t *buffer, uint32_t frameNumber);
+    virtual void streamCbRoutine(mm_camera_super_buf_t *super_frame,
+                            QCamera3Stream *stream);
+
+    virtual QCamera3StreamMem *getStreamBufs(uint32_t le);
+    virtual void putStreamBufs();
+    virtual int32_t registerBuffer(buffer_handle_t * /*buffer*/, cam_is_type_t /*isType*/)
+            { return NO_ERROR; };
+
+    static cam_dimension_t kDim;
+private:
+    QCamera3StreamMem *mMemory;
+    cam_dimension_t mDim;
+    cam_stream_type_t mStreamType;
+    cam_format_t mStreamFormat;
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA_CHANNEL_H__ */
diff --git a/camera/QCamera2/HAL3/QCamera3CropRegionMapper.cpp b/camera/QCamera2/HAL3/QCamera3CropRegionMapper.cpp
new file mode 100644
index 0000000..de4f561
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3CropRegionMapper.cpp
@@ -0,0 +1,268 @@
+/* Copyright (c) 2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+#define LOG_TAG "QCamera3CropRegionMapper"
+
+#include "QCamera3CropRegionMapper.h"
+#include "QCamera3HWI.h"
+
+using namespace android;
+
+namespace qcamera {
+
+/*===========================================================================
+ * FUNCTION   : QCamera3CropRegionMapper
+ *
+ * DESCRIPTION: Constructor
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3CropRegionMapper::QCamera3CropRegionMapper()
+        : mSensorW(0),
+          mSensorH(0),
+          mActiveArrayW(0),
+          mActiveArrayH(0)
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3CropRegionMapper
+ *
+ * DESCRIPTION: destructor
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+
+QCamera3CropRegionMapper::~QCamera3CropRegionMapper()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : update
+ *
+ * DESCRIPTION: update sensor active array size and sensor output size
+ *
+ * PARAMETERS :
+ *   @active_array_w : active array width
+ *   @active_array_h : active array height
+ *   @sensor_w       : sensor output width
+ *   @sensor_h       : sensor output height
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3CropRegionMapper::update(uint32_t active_array_w,
+        uint32_t active_array_h, uint32_t sensor_w,
+        uint32_t sensor_h)
+{
+    // Sanity check
+    if (active_array_w == 0 || active_array_h == 0 ||
+            sensor_w == 0 || sensor_h == 0) {
+        ALOGE("%s: active_array size and sensor output size must be non zero",
+                __func__);
+        return;
+    }
+    if (active_array_w < sensor_w || active_array_h < sensor_h) {
+        ALOGE("%s: invalid input: active_array [%d, %d], sensor size [%d, %d]",
+                __func__, active_array_w, active_array_h, sensor_w, sensor_h);
+        return;
+    }
+    mSensorW = sensor_w;
+    mSensorH = sensor_h;
+    mActiveArrayW = active_array_w;
+    mActiveArrayH = active_array_h;
+
+    ALOGI("%s: active_array: %d x %d, sensor size %d x %d", __func__,
+            mActiveArrayW, mActiveArrayH, mSensorW, mSensorH);
+}
+
+/*===========================================================================
+ * FUNCTION   : toActiveArray
+ *
+ * DESCRIPTION: Map crop rectangle from sensor output space to active array space
+ *
+ * PARAMETERS :
+ *   @crop_left   : x coordinate of top left corner of rectangle
+ *   @crop_top    : y coordinate of top left corner of rectangle
+ *   @crop_width  : width of rectangle
+ *   @crop_height : height of rectangle
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3CropRegionMapper::toActiveArray(int32_t& crop_left, int32_t& crop_top,
+        int32_t& crop_width, int32_t& crop_height)
+{
+    if (mSensorW == 0 || mSensorH == 0 ||
+            mActiveArrayW == 0 || mActiveArrayH == 0) {
+        ALOGE("%s: sensor/active array sizes are not initialized!", __func__);
+        return;
+    }
+
+    crop_left = crop_left * mActiveArrayW / mSensorW;
+    crop_top = crop_top * mActiveArrayH / mSensorH;
+    crop_width = crop_width * mActiveArrayW / mSensorW;
+    crop_height = crop_height * mActiveArrayH / mSensorH;
+
+    boundToSize(crop_left, crop_top, crop_width, crop_height,
+            mActiveArrayW, mActiveArrayH);
+}
+
+/*===========================================================================
+ * FUNCTION   : toSensor
+ *
+ * DESCRIPTION: Map crop rectangle from active array space to sensor output space
+ *
+ * PARAMETERS :
+ *   @crop_left   : x coordinate of top left corner of rectangle
+ *   @crop_top    : y coordinate of top left corner of rectangle
+ *   @crop_width  : width of rectangle
+ *   @crop_height : height of rectangle
+ *
+ * RETURN     : none
+ *==========================================================================*/
+
+void QCamera3CropRegionMapper::toSensor(int32_t& crop_left, int32_t& crop_top,
+        int32_t& crop_width, int32_t& crop_height)
+{
+    if (mSensorW == 0 || mSensorH == 0 ||
+            mActiveArrayW == 0 || mActiveArrayH == 0) {
+        ALOGE("%s: sensor/active array sizes are not initialized!", __func__);
+        return;
+    }
+
+    crop_left = crop_left * mSensorW / mActiveArrayW;
+    crop_top = crop_top * mSensorH / mActiveArrayH;
+    crop_width = crop_width * mSensorW / mActiveArrayW;
+    crop_height = crop_height * mSensorH / mActiveArrayH;
+
+    CDBG("%s: before bounding left %d, top %d, width %d, height %d",
+        __func__, crop_left, crop_top, crop_width, crop_height);
+    boundToSize(crop_left, crop_top, crop_width, crop_height,
+            mSensorW, mSensorH);
+    CDBG("%s: after bounding left %d, top %d, width %d, height %d",
+        __func__, crop_left, crop_top, crop_width, crop_height);
+}
+
+/*===========================================================================
+ * FUNCTION   : boundToSize
+ *
+ * DESCRIPTION: Bound a particular rectangle inside a bounding box
+ *
+ * PARAMETERS :
+ *   @left    : x coordinate of top left corner of rectangle
+ *   @top     : y coordinate of top left corner of rectangle
+ *   @width   : width of rectangle
+ *   @height  : height of rectangle
+ *   @bound_w : width of bounding box
+ *   @bound_y : height of bounding box
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3CropRegionMapper::boundToSize(int32_t& left, int32_t& top,
+            int32_t& width, int32_t& height, int32_t bound_w, int32_t bound_h)
+{
+    if (left < 0) {
+        left = 0;
+    }
+    if (top < 0) {
+        top = 0;
+    }
+
+    if ((left + width) > bound_w) {
+        width = bound_w - left;
+    }
+    if ((top + height) > bound_h) {
+        height = bound_h - top;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : toActiveArray
+ *
+ * DESCRIPTION: Map co-ordinate from sensor output space to active array space
+ *
+ * PARAMETERS :
+ *   @x   : x coordinate
+ *   @y   : y coordinate
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3CropRegionMapper::toActiveArray(uint32_t& x, uint32_t& y)
+{
+    if (mSensorW == 0 || mSensorH == 0 ||
+            mActiveArrayW == 0 || mActiveArrayH == 0) {
+        ALOGE("%s: sensor/active array sizes are not initialized!", __func__);
+        return;
+    }
+    if ((x > static_cast<uint32_t>(mSensorW)) ||
+            (y > static_cast<uint32_t>(mSensorH))) {
+        ALOGE("%s: invalid co-ordinate (%d, %d) in (0, 0, %d, %d) space",
+                __func__, x, y, mSensorW, mSensorH);
+        return;
+    }
+    x = x * mActiveArrayW / mSensorW;
+    y = y * mActiveArrayH / mSensorH;
+}
+
+/*===========================================================================
+ * FUNCTION   : toSensor
+ *
+ * DESCRIPTION: Map co-ordinate from active array space to sensor output space
+ *
+ * PARAMETERS :
+ *   @x   : x coordinate
+ *   @y   : y coordinate
+ *
+ * RETURN     : none
+ *==========================================================================*/
+
+void QCamera3CropRegionMapper::toSensor(uint32_t& x, uint32_t& y)
+{
+    if (mSensorW == 0 || mSensorH == 0 ||
+            mActiveArrayW == 0 || mActiveArrayH == 0) {
+        ALOGE("%s: sensor/active array sizes are not initialized!", __func__);
+        return;
+    }
+
+    if ((x > static_cast<uint32_t>(mActiveArrayW)) ||
+            (y > static_cast<uint32_t>(mActiveArrayH))) {
+        ALOGE("%s: invalid co-ordinate (%d, %d) in (0, 0, %d, %d) space",
+                __func__, x, y, mSensorW, mSensorH);
+        return;
+    }
+    x = x * mSensorW / mActiveArrayW;
+    y = y * mSensorH / mActiveArrayH;
+}
+
+}; //end namespace android
diff --git a/camera/QCamera2/HAL3/QCamera3CropRegionMapper.h b/camera/QCamera2/HAL3/QCamera3CropRegionMapper.h
new file mode 100644
index 0000000..f87faae
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3CropRegionMapper.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#ifndef __QCAMERA3CROPREGIONMAPPER_H__
+#define __QCAMERA3CROPREGIONMAPPER_H__
+
+#include <utils/Log.h>
+#include <utils/Errors.h>
+#include "QCamera3HALHeader.h"
+
+using namespace android;
+
+namespace qcamera {
+
+class QCamera3CropRegionMapper {
+public:
+    QCamera3CropRegionMapper();
+    virtual ~QCamera3CropRegionMapper();
+
+    void update(uint32_t active_array_w, uint32_t active_array_h,
+            uint32_t sensor_w, uint32_t sensor_h);
+    void toActiveArray(int32_t& crop_left, int32_t& crop_top,
+            int32_t& crop_width, int32_t& crop_height);
+    void toSensor(int32_t& crop_left, int32_t& crop_top,
+            int32_t& crop_width, int32_t& crop_height);
+    void toActiveArray(uint32_t& x, uint32_t& y);
+    void toSensor(uint32_t& x, uint32_t& y);
+
+private:
+    /* sensor output size */
+    int32_t mSensorW, mSensorH;
+    int32_t mActiveArrayW, mActiveArrayH;
+
+    void boundToSize(int32_t& left, int32_t& top, int32_t& width,
+            int32_t& height, int32_t bound_w, int32_t bound_h);
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA3CROPREGIONMAPPER_H__ */
diff --git a/camera/QCamera2/HAL3/QCamera3HALHeader.h b/camera/QCamera2/HAL3/QCamera3HALHeader.h
new file mode 100644
index 0000000..4502b8a
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3HALHeader.h
@@ -0,0 +1,96 @@
+/* Copyright (c) 2013-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*	notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*	copyright notice, this list of conditions and the following
+*	disclaimer in the documentation and/or other materials provided
+*	with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*	contributors may be used to endorse or promote products derived
+*	from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#ifndef __QCAMERA_HALHEADER_H__
+#define __QCAMERA_HALHEADER_H__
+
+extern "C" {
+#include <mm_camera_interface.h>
+#include <mm_jpeg_interface.h>
+}
+
+using namespace android;
+
+namespace qcamera {
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+#define IS_USAGE_ZSL(usage)  (((usage) & (GRALLOC_USAGE_HW_CAMERA_ZSL)) \
+        == (GRALLOC_USAGE_HW_CAMERA_ZSL))
+
+class QCamera3Channel;
+class QCamera3ProcessingChannel;
+
+    typedef enum {
+        INVALID,
+        VALID,
+    } stream_status_t;
+
+    typedef enum {
+       REPROCESS_TYPE_NONE,
+       REPROCESS_TYPE_JPEG,
+       REPROCESS_TYPE_YUV,
+       REPROCESS_TYPE_PRIVATE,
+       REPROCESS_TYPE_RAW
+    } reprocess_type_t;
+
+    typedef struct {
+        uint32_t out_buf_index;
+        int32_t jpeg_orientation;
+        uint8_t jpeg_quality;
+        uint8_t jpeg_thumb_quality;
+        cam_dimension_t thumbnail_size;
+        uint8_t gps_timestamp_valid;
+        int64_t gps_timestamp;
+        uint8_t gps_coordinates_valid;
+        double gps_coordinates[3];
+        char gps_processing_method[GPS_PROCESSING_METHOD_SIZE];
+        uint8_t image_desc_valid;
+        char image_desc[EXIF_IMAGE_DESCRIPTION_SIZE];
+    } jpeg_settings_t;
+
+    typedef struct {
+        int32_t iso_speed;
+        int64_t exposure_time;
+    } metadata_response_t;
+
+    typedef struct {
+        cam_stream_type_t stream_type;
+        cam_format_t stream_format;
+        cam_dimension_t input_stream_dim;
+        cam_stream_buf_plane_info_t input_stream_plane_info;
+        cam_dimension_t output_stream_dim;
+        cam_padding_info_t *padding;
+        reprocess_type_t reprocess_type;
+        QCamera3ProcessingChannel *src_channel;
+    } reprocess_config_t;
+
+};//namespace qcamera
+
+#endif
diff --git a/camera/QCamera2/HAL3/QCamera3HWI.cpp b/camera/QCamera2/HAL3/QCamera3HWI.cpp
new file mode 100644
index 0000000..d1b9913
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3HWI.cpp
@@ -0,0 +1,9551 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+#define LOG_TAG "QCamera3HWI"
+//#define LOG_NDEBUG 0
+
+#define __STDC_LIMIT_MACROS
+#include <cutils/properties.h>
+#include <hardware/camera3.h>
+#include <camera/CameraMetadata.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <stdint.h>
+#include <utils/Log.h>
+#include <utils/Errors.h>
+#include <utils/Trace.h>
+#include <sync/sync.h>
+#include <gralloc_priv.h>
+#include "util/QCameraFlash.h"
+#include "QCamera3HWI.h"
+#include "QCamera3Mem.h"
+#include "QCamera3Channel.h"
+#include "QCamera3PostProc.h"
+#include "QCamera3VendorTags.h"
+
+using namespace android;
+
+namespace qcamera {
+
+#define DATA_PTR(MEM_OBJ,INDEX) MEM_OBJ->getPtr( INDEX )
+
+#define EMPTY_PIPELINE_DELAY 2
+#define PARTIAL_RESULT_COUNT 2
+#define FRAME_SKIP_DELAY     0
+#define CAM_MAX_SYNC_LATENCY 4
+
+#define MAX_VALUE_8BIT ((1<<8)-1)
+#define MAX_VALUE_10BIT ((1<<10)-1)
+#define MAX_VALUE_12BIT ((1<<12)-1)
+
+#define VIDEO_4K_WIDTH  3840
+#define VIDEO_4K_HEIGHT 2160
+
+#define MAX_EIS_WIDTH 1920
+#define MAX_EIS_HEIGHT 1080
+
+#define MAX_RAW_STREAMS        1
+#define MAX_STALLING_STREAMS   1
+#define MAX_PROCESSED_STREAMS  3
+/* Batch mode is enabled only if FPS set is equal to or greater than this */
+#define MIN_FPS_FOR_BATCH_MODE (120)
+#define PREVIEW_FPS_FOR_HFR    (30)
+#define DEFAULT_VIDEO_FPS      (30.0)
+#define MAX_HFR_BATCH_SIZE     (8)
+#define REGIONS_TUPLE_COUNT    5
+#define HDR_PLUS_PERF_TIME_OUT  (7000) // milliseconds
+
+#define METADATA_MAP_SIZE(MAP) (sizeof(MAP)/sizeof(MAP[0]))
+
+#define CAM_QCOM_FEATURE_PP_SUPERSET_HAL3   ( CAM_QCOM_FEATURE_DENOISE2D |\
+                                              CAM_QCOM_FEATURE_CROP |\
+                                              CAM_QCOM_FEATURE_ROTATION |\
+                                              CAM_QCOM_FEATURE_SHARPNESS |\
+                                              CAM_QCOM_FEATURE_SCALE |\
+                                              CAM_QCOM_FEATURE_CAC |\
+                                              CAM_QCOM_FEATURE_CDS )
+
+#define TIMEOUT_NEVER -1
+
+cam_capability_t *gCamCapability[MM_CAMERA_MAX_NUM_SENSORS];
+const camera_metadata_t *gStaticMetadata[MM_CAMERA_MAX_NUM_SENSORS];
+static pthread_mutex_t gCamLock = PTHREAD_MUTEX_INITIALIZER;
+volatile uint32_t gCamHal3LogLevel = 1;
+
+const QCamera3HardwareInterface::QCameraPropMap QCamera3HardwareInterface::CDS_MAP [] = {
+    {"On",  CAM_CDS_MODE_ON},
+    {"Off", CAM_CDS_MODE_OFF},
+    {"Auto",CAM_CDS_MODE_AUTO}
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_control_effect_mode_t,
+        cam_effect_mode_type> QCamera3HardwareInterface::EFFECT_MODES_MAP[] = {
+    { ANDROID_CONTROL_EFFECT_MODE_OFF,       CAM_EFFECT_MODE_OFF },
+    { ANDROID_CONTROL_EFFECT_MODE_MONO,       CAM_EFFECT_MODE_MONO },
+    { ANDROID_CONTROL_EFFECT_MODE_NEGATIVE,   CAM_EFFECT_MODE_NEGATIVE },
+    { ANDROID_CONTROL_EFFECT_MODE_SOLARIZE,   CAM_EFFECT_MODE_SOLARIZE },
+    { ANDROID_CONTROL_EFFECT_MODE_SEPIA,      CAM_EFFECT_MODE_SEPIA },
+    { ANDROID_CONTROL_EFFECT_MODE_POSTERIZE,  CAM_EFFECT_MODE_POSTERIZE },
+    { ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD, CAM_EFFECT_MODE_WHITEBOARD },
+    { ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD, CAM_EFFECT_MODE_BLACKBOARD },
+    { ANDROID_CONTROL_EFFECT_MODE_AQUA,       CAM_EFFECT_MODE_AQUA }
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_control_awb_mode_t,
+        cam_wb_mode_type> QCamera3HardwareInterface::WHITE_BALANCE_MODES_MAP[] = {
+    { ANDROID_CONTROL_AWB_MODE_OFF,             CAM_WB_MODE_OFF },
+    { ANDROID_CONTROL_AWB_MODE_AUTO,            CAM_WB_MODE_AUTO },
+    { ANDROID_CONTROL_AWB_MODE_INCANDESCENT,    CAM_WB_MODE_INCANDESCENT },
+    { ANDROID_CONTROL_AWB_MODE_FLUORESCENT,     CAM_WB_MODE_FLUORESCENT },
+    { ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT,CAM_WB_MODE_WARM_FLUORESCENT},
+    { ANDROID_CONTROL_AWB_MODE_DAYLIGHT,        CAM_WB_MODE_DAYLIGHT },
+    { ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT, CAM_WB_MODE_CLOUDY_DAYLIGHT },
+    { ANDROID_CONTROL_AWB_MODE_TWILIGHT,        CAM_WB_MODE_TWILIGHT },
+    { ANDROID_CONTROL_AWB_MODE_SHADE,           CAM_WB_MODE_SHADE }
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_control_scene_mode_t,
+        cam_scene_mode_type> QCamera3HardwareInterface::SCENE_MODES_MAP[] = {
+    { ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY,  CAM_SCENE_MODE_FACE_PRIORITY },
+    { ANDROID_CONTROL_SCENE_MODE_ACTION,         CAM_SCENE_MODE_ACTION },
+    { ANDROID_CONTROL_SCENE_MODE_PORTRAIT,       CAM_SCENE_MODE_PORTRAIT },
+    { ANDROID_CONTROL_SCENE_MODE_LANDSCAPE,      CAM_SCENE_MODE_LANDSCAPE },
+    { ANDROID_CONTROL_SCENE_MODE_NIGHT,          CAM_SCENE_MODE_NIGHT },
+    { ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT, CAM_SCENE_MODE_NIGHT_PORTRAIT },
+    { ANDROID_CONTROL_SCENE_MODE_THEATRE,        CAM_SCENE_MODE_THEATRE },
+    { ANDROID_CONTROL_SCENE_MODE_BEACH,          CAM_SCENE_MODE_BEACH },
+    { ANDROID_CONTROL_SCENE_MODE_SNOW,           CAM_SCENE_MODE_SNOW },
+    { ANDROID_CONTROL_SCENE_MODE_SUNSET,         CAM_SCENE_MODE_SUNSET },
+    { ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO,    CAM_SCENE_MODE_ANTISHAKE },
+    { ANDROID_CONTROL_SCENE_MODE_FIREWORKS ,     CAM_SCENE_MODE_FIREWORKS },
+    { ANDROID_CONTROL_SCENE_MODE_SPORTS ,        CAM_SCENE_MODE_SPORTS },
+    { ANDROID_CONTROL_SCENE_MODE_PARTY,          CAM_SCENE_MODE_PARTY },
+    { ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT,    CAM_SCENE_MODE_CANDLELIGHT },
+    { ANDROID_CONTROL_SCENE_MODE_BARCODE,        CAM_SCENE_MODE_BARCODE}
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_control_af_mode_t,
+        cam_focus_mode_type> QCamera3HardwareInterface::FOCUS_MODES_MAP[] = {
+    { ANDROID_CONTROL_AF_MODE_OFF,                CAM_FOCUS_MODE_OFF },
+    { ANDROID_CONTROL_AF_MODE_OFF,                CAM_FOCUS_MODE_FIXED },
+    { ANDROID_CONTROL_AF_MODE_AUTO,               CAM_FOCUS_MODE_AUTO },
+    { ANDROID_CONTROL_AF_MODE_MACRO,              CAM_FOCUS_MODE_MACRO },
+    { ANDROID_CONTROL_AF_MODE_EDOF,               CAM_FOCUS_MODE_EDOF },
+    { ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE, CAM_FOCUS_MODE_CONTINOUS_PICTURE },
+    { ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO,   CAM_FOCUS_MODE_CONTINOUS_VIDEO }
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_color_correction_aberration_mode_t,
+        cam_aberration_mode_t> QCamera3HardwareInterface::COLOR_ABERRATION_MAP[] = {
+    { ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF,
+            CAM_COLOR_CORRECTION_ABERRATION_OFF },
+    { ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST,
+            CAM_COLOR_CORRECTION_ABERRATION_FAST },
+    { ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY,
+            CAM_COLOR_CORRECTION_ABERRATION_HIGH_QUALITY },
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_control_ae_antibanding_mode_t,
+        cam_antibanding_mode_type> QCamera3HardwareInterface::ANTIBANDING_MODES_MAP[] = {
+    { ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF,  CAM_ANTIBANDING_MODE_OFF },
+    { ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ, CAM_ANTIBANDING_MODE_50HZ },
+    { ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ, CAM_ANTIBANDING_MODE_60HZ },
+    { ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO, CAM_ANTIBANDING_MODE_AUTO }
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_control_ae_mode_t,
+        cam_flash_mode_t> QCamera3HardwareInterface::AE_FLASH_MODE_MAP[] = {
+    { ANDROID_CONTROL_AE_MODE_OFF,                  CAM_FLASH_MODE_OFF },
+    { ANDROID_CONTROL_AE_MODE_ON,                   CAM_FLASH_MODE_OFF },
+    { ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH,        CAM_FLASH_MODE_AUTO},
+    { ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH,      CAM_FLASH_MODE_ON  },
+    { ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE, CAM_FLASH_MODE_AUTO}
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_flash_mode_t,
+        cam_flash_mode_t> QCamera3HardwareInterface::FLASH_MODES_MAP[] = {
+    { ANDROID_FLASH_MODE_OFF,    CAM_FLASH_MODE_OFF  },
+    { ANDROID_FLASH_MODE_SINGLE, CAM_FLASH_MODE_SINGLE },
+    { ANDROID_FLASH_MODE_TORCH,  CAM_FLASH_MODE_TORCH }
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_statistics_face_detect_mode_t,
+        cam_face_detect_mode_t> QCamera3HardwareInterface::FACEDETECT_MODES_MAP[] = {
+    { ANDROID_STATISTICS_FACE_DETECT_MODE_OFF,    CAM_FACE_DETECT_MODE_OFF     },
+    { ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE, CAM_FACE_DETECT_MODE_SIMPLE  },
+    { ANDROID_STATISTICS_FACE_DETECT_MODE_FULL,   CAM_FACE_DETECT_MODE_FULL    }
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_lens_info_focus_distance_calibration_t,
+        cam_focus_calibration_t> QCamera3HardwareInterface::FOCUS_CALIBRATION_MAP[] = {
+    { ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED,
+      CAM_FOCUS_UNCALIBRATED },
+    { ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE,
+      CAM_FOCUS_APPROXIMATE },
+    { ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_CALIBRATED,
+      CAM_FOCUS_CALIBRATED }
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_lens_state_t,
+        cam_af_lens_state_t> QCamera3HardwareInterface::LENS_STATE_MAP[] = {
+    { ANDROID_LENS_STATE_STATIONARY,    CAM_AF_LENS_STATE_STATIONARY},
+    { ANDROID_LENS_STATE_MOVING,        CAM_AF_LENS_STATE_MOVING}
+};
+
+const int32_t available_thumbnail_sizes[] = {0, 0,
+                                             176, 144,
+                                             320, 240,
+                                             432, 288,
+                                             480, 288,
+                                             512, 288,
+                                             512, 384};
+
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_sensor_test_pattern_mode_t,
+        cam_test_pattern_mode_t> QCamera3HardwareInterface::TEST_PATTERN_MAP[] = {
+    { ANDROID_SENSOR_TEST_PATTERN_MODE_OFF,          CAM_TEST_PATTERN_OFF   },
+    { ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR,  CAM_TEST_PATTERN_SOLID_COLOR },
+    { ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS,   CAM_TEST_PATTERN_COLOR_BARS },
+    { ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY, CAM_TEST_PATTERN_COLOR_BARS_FADE_TO_GRAY },
+    { ANDROID_SENSOR_TEST_PATTERN_MODE_PN9,          CAM_TEST_PATTERN_PN9 },
+};
+
+/* Since there is no mapping for all the options some Android enum are not listed.
+ * Also, the order in this list is important because while mapping from HAL to Android it will
+ * traverse from lower to higher index which means that for HAL values that are map to different
+ * Android values, the traverse logic will select the first one found.
+ */
+const QCamera3HardwareInterface::QCameraMap<
+        camera_metadata_enum_android_sensor_reference_illuminant1_t,
+        cam_illuminat_t> QCamera3HardwareInterface::REFERENCE_ILLUMINANT_MAP[] = {
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FLUORESCENT, CAM_AWB_WARM_FLO},
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT_FLUORESCENT, CAM_AWB_CUSTOM_DAYLIGHT },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_COOL_WHITE_FLUORESCENT, CAM_AWB_COLD_FLO },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_A, CAM_AWB_A },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D55, CAM_AWB_NOON },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D65, CAM_AWB_D65 },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D75, CAM_AWB_D75 },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D50, CAM_AWB_D50 },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_ISO_STUDIO_TUNGSTEN, CAM_AWB_CUSTOM_A},
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT, CAM_AWB_D50 },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_TUNGSTEN, CAM_AWB_A },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FINE_WEATHER, CAM_AWB_D50 },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_CLOUDY_WEATHER, CAM_AWB_D65 },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_SHADE, CAM_AWB_D75 },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAY_WHITE_FLUORESCENT, CAM_AWB_CUSTOM_DAYLIGHT },
+    { ANDROID_SENSOR_REFERENCE_ILLUMINANT1_WHITE_FLUORESCENT, CAM_AWB_COLD_FLO},
+};
+
+const QCamera3HardwareInterface::QCameraMap<
+        int32_t, cam_hfr_mode_t> QCamera3HardwareInterface::HFR_MODE_MAP[] = {
+    { 60, CAM_HFR_MODE_60FPS},
+    { 90, CAM_HFR_MODE_90FPS},
+    { 120, CAM_HFR_MODE_120FPS},
+    { 150, CAM_HFR_MODE_150FPS},
+    { 180, CAM_HFR_MODE_180FPS},
+    { 210, CAM_HFR_MODE_210FPS},
+    { 240, CAM_HFR_MODE_240FPS},
+    { 480, CAM_HFR_MODE_480FPS},
+};
+
+camera3_device_ops_t QCamera3HardwareInterface::mCameraOps = {
+    .initialize =                         QCamera3HardwareInterface::initialize,
+    .configure_streams =                  QCamera3HardwareInterface::configure_streams,
+    .register_stream_buffers =            NULL,
+    .construct_default_request_settings = QCamera3HardwareInterface::construct_default_request_settings,
+    .process_capture_request =            QCamera3HardwareInterface::process_capture_request,
+    .get_metadata_vendor_tag_ops =        NULL,
+    .dump =                               QCamera3HardwareInterface::dump,
+    .flush =                              QCamera3HardwareInterface::flush,
+    .reserved =                           {0},
+};
+
+/*===========================================================================
+ * FUNCTION   : QCamera3HardwareInterface
+ *
+ * DESCRIPTION: constructor of QCamera3HardwareInterface
+ *
+ * PARAMETERS :
+ *   @cameraId  : camera ID
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3HardwareInterface::QCamera3HardwareInterface(uint32_t cameraId,
+        const camera_module_callbacks_t *callbacks)
+    : mCameraId(cameraId),
+      mCameraHandle(NULL),
+      mCameraOpened(false),
+      mCameraInitialized(false),
+      mCallbackOps(NULL),
+      mMetadataChannel(NULL),
+      mPictureChannel(NULL),
+      mRawChannel(NULL),
+      mSupportChannel(NULL),
+      mAnalysisChannel(NULL),
+      mRawDumpChannel(NULL),
+      mDummyBatchChannel(NULL),
+      mChannelHandle(0),
+      mFirstRequest(false),
+      mFirstConfiguration(true),
+      mFlush(false),
+      mParamHeap(NULL),
+      mParameters(NULL),
+      mPrevParameters(NULL),
+      m_bIsVideo(false),
+      m_bIs4KVideo(false),
+      m_bEisSupportedSize(false),
+      m_bEisEnable(false),
+      m_MobicatMask(0),
+      mMinProcessedFrameDuration(0),
+      mMinJpegFrameDuration(0),
+      mMinRawFrameDuration(0),
+      mMetaFrameCount(0U),
+      mUpdateDebugLevel(false),
+      mCallbacks(callbacks),
+      mCaptureIntent(0),
+      mBatchSize(0),
+      mToBeQueuedVidBufs(0),
+      mHFRVideoFps(DEFAULT_VIDEO_FPS),
+      mOpMode(CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE),
+      mFirstFrameNumberInBatch(0),
+      mNeedSensorRestart(false),
+      mLdafCalibExist(false),
+      mPowerHintEnabled(false),
+      mLastCustIntentFrmNum(-1)
+{
+    getLogLevel();
+    m_perfLock.lock_init();
+    mCameraDevice.common.tag = HARDWARE_DEVICE_TAG;
+    mCameraDevice.common.version = CAMERA_DEVICE_API_VERSION_3_3;
+    mCameraDevice.common.close = close_camera_device;
+    mCameraDevice.ops = &mCameraOps;
+    mCameraDevice.priv = this;
+    gCamCapability[cameraId]->version = CAM_HAL_V3;
+    // TODO: hardcode for now until mctl add support for min_num_pp_bufs
+    //TBD - To see if this hardcoding is needed. Check by printing if this is filled by mctl to 3
+    gCamCapability[cameraId]->min_num_pp_bufs = 3;
+    pthread_cond_init(&mRequestCond, NULL);
+    mPendingLiveRequest = 0;
+    mCurrentRequestId = -1;
+    pthread_mutex_init(&mMutex, NULL);
+
+    for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++)
+        mDefaultMetadata[i] = NULL;
+
+    // Getting system props of different kinds
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.raw.dump", prop, "0");
+    mEnableRawDump = atoi(prop);
+    if (mEnableRawDump)
+        CDBG("%s: Raw dump from Camera HAL enabled", __func__);
+
+    memset(&mInputStreamInfo, 0, sizeof(mInputStreamInfo));
+    memset(mLdafCalib, 0, sizeof(mLdafCalib));
+
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.tnr.preview", prop, "1");
+    m_bTnrPreview = (uint8_t)atoi(prop);
+
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.tnr.video", prop, "1");
+    m_bTnrVideo = (uint8_t)atoi(prop);
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3HardwareInterface
+ *
+ * DESCRIPTION: destructor of QCamera3HardwareInterface
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3HardwareInterface::~QCamera3HardwareInterface()
+{
+    CDBG("%s: E", __func__);
+    bool hasPendingBuffers = (mPendingBuffersMap.num_buffers > 0);
+
+    /* Turn off current power hint before acquiring perfLock in case they
+     * conflict with each other */
+    disablePowerHint();
+
+    m_perfLock.lock_acq();
+
+    /* We need to stop all streams before deleting any stream */
+    if (mRawDumpChannel) {
+        mRawDumpChannel->stop();
+    }
+
+    // NOTE: 'camera3_stream_t *' objects are already freed at
+    //        this stage by the framework
+    for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+        it != mStreamInfo.end(); it++) {
+        QCamera3ProcessingChannel *channel = (*it)->channel;
+        if (channel) {
+            channel->stop();
+        }
+    }
+    if (mSupportChannel)
+        mSupportChannel->stop();
+
+    if (mAnalysisChannel) {
+        mAnalysisChannel->stop();
+    }
+    if (mMetadataChannel) {
+        mMetadataChannel->stop();
+    }
+    if (mChannelHandle) {
+        mCameraHandle->ops->stop_channel(mCameraHandle->camera_handle,
+                mChannelHandle);
+        ALOGI("%s: stopping channel %d", __func__, mChannelHandle);
+    }
+
+    for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+        it != mStreamInfo.end(); it++) {
+        QCamera3ProcessingChannel *channel = (*it)->channel;
+        if (channel)
+            delete channel;
+        free (*it);
+    }
+    if (mSupportChannel) {
+        delete mSupportChannel;
+        mSupportChannel = NULL;
+    }
+
+    if (mAnalysisChannel) {
+        delete mAnalysisChannel;
+        mAnalysisChannel = NULL;
+    }
+    if (mRawDumpChannel) {
+        delete mRawDumpChannel;
+        mRawDumpChannel = NULL;
+    }
+    if (mDummyBatchChannel) {
+        delete mDummyBatchChannel;
+        mDummyBatchChannel = NULL;
+    }
+    mPictureChannel = NULL;
+
+    if (mMetadataChannel) {
+        delete mMetadataChannel;
+        mMetadataChannel = NULL;
+    }
+
+    /* Clean up all channels */
+    if (mCameraInitialized) {
+        if(!mFirstConfiguration){
+            clear_metadata_buffer(mParameters);
+
+            // Check if there is still pending buffer not yet returned.
+            if (hasPendingBuffers) {
+                uint8_t restart = TRUE;
+                ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_DAEMON_RESTART,
+                        restart);
+            }
+
+            //send the last unconfigure
+            cam_stream_size_info_t stream_config_info;
+            memset(&stream_config_info, 0, sizeof(cam_stream_size_info_t));
+            stream_config_info.buffer_info.min_buffers = MIN_INFLIGHT_REQUESTS;
+            stream_config_info.buffer_info.max_buffers =
+                    m_bIs4KVideo ? 0 : MAX_INFLIGHT_REQUESTS;
+            ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_STREAM_INFO,
+                    stream_config_info);
+
+            int rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, mParameters);
+            if (rc < 0) {
+                ALOGE("%s: set_parms failed for unconfigure", __func__);
+            }
+        }
+        deinitParameters();
+    }
+
+    if (mChannelHandle) {
+        mCameraHandle->ops->delete_channel(mCameraHandle->camera_handle,
+                mChannelHandle);
+        ALOGE("%s: deleting channel %d", __func__, mChannelHandle);
+        mChannelHandle = 0;
+    }
+
+    if (mCameraOpened)
+        closeCamera();
+
+    mPendingBuffersMap.mPendingBufferList.clear();
+    mPendingReprocessResultList.clear();
+    for (pendingRequestIterator i = mPendingRequestsList.begin();
+            i != mPendingRequestsList.end();) {
+        i = erasePendingRequest(i);
+    }
+    for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++)
+        if (mDefaultMetadata[i])
+            free_camera_metadata(mDefaultMetadata[i]);
+
+    m_perfLock.lock_rel();
+    m_perfLock.lock_deinit();
+
+    pthread_cond_destroy(&mRequestCond);
+
+    pthread_mutex_destroy(&mMutex);
+
+    if (hasPendingBuffers) {
+        ALOGE("%s: Not all buffers were returned. Notified the camera daemon process to restart."
+                " Exiting here...", __func__);
+        exit(EXIT_FAILURE);
+    }
+    CDBG("%s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : erasePendingRequest
+ *
+ * DESCRIPTION: function to erase a desired pending request after freeing any
+ *              allocated memory
+ *
+ * PARAMETERS :
+ *   @i       : iterator pointing to pending request to be erased
+ *
+ * RETURN     : iterator pointing to the next request
+ *==========================================================================*/
+QCamera3HardwareInterface::pendingRequestIterator
+        QCamera3HardwareInterface::erasePendingRequest (pendingRequestIterator i)
+{
+    if (i->input_buffer != NULL) {
+        free(i->input_buffer);
+        i->input_buffer = NULL;
+    }
+    if (i->settings != NULL)
+        free_camera_metadata((camera_metadata_t*)i->settings);
+    return mPendingRequestsList.erase(i);
+}
+
+/*===========================================================================
+ * FUNCTION   : camEvtHandle
+ *
+ * DESCRIPTION: Function registered to mm-camera-interface to handle events
+ *
+ * PARAMETERS :
+ *   @camera_handle : interface layer camera handle
+ *   @evt           : ptr to event
+ *   @user_data     : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3HardwareInterface::camEvtHandle(uint32_t /*camera_handle*/,
+                                          mm_camera_event_t *evt,
+                                          void *user_data)
+{
+    QCamera3HardwareInterface *obj = (QCamera3HardwareInterface *)user_data;
+    if (obj && evt) {
+        switch(evt->server_event_type) {
+            case CAM_EVENT_TYPE_DAEMON_DIED:
+                ALOGE("%s: Fatal, camera daemon died", __func__);
+                //close the camera backend
+                if (obj->mCameraHandle && obj->mCameraHandle->camera_handle
+                        && obj->mCameraHandle->ops) {
+                    obj->mCameraHandle->ops->error_close_camera(obj->mCameraHandle->camera_handle);
+                } else {
+                    ALOGE("%s: Could not close camera on error because the handle or ops is NULL",
+                            __func__);
+                }
+                camera3_notify_msg_t notify_msg;
+                memset(&notify_msg, 0, sizeof(camera3_notify_msg_t));
+                notify_msg.type = CAMERA3_MSG_ERROR;
+                notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_DEVICE;
+                notify_msg.message.error.error_stream = NULL;
+                notify_msg.message.error.frame_number = 0;
+                obj->mCallbackOps->notify(obj->mCallbackOps, &notify_msg);
+                break;
+
+            case CAM_EVENT_TYPE_DAEMON_PULL_REQ:
+                CDBG("%s: HAL got request pull from Daemon", __func__);
+                pthread_mutex_lock(&obj->mMutex);
+                obj->mWokenUpByDaemon = true;
+                obj->unblockRequestIfNecessary();
+                pthread_mutex_unlock(&obj->mMutex);
+                break;
+
+            default:
+                CDBG_HIGH("%s: Warning: Unhandled event %d", __func__,
+                        evt->server_event_type);
+                break;
+        }
+    } else {
+        ALOGE("%s: NULL user_data/evt", __func__);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : openCamera
+ *
+ * DESCRIPTION: open camera
+ *
+ * PARAMETERS :
+ *   @hw_device  : double ptr for camera device struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HardwareInterface::openCamera(struct hw_device_t **hw_device)
+{
+    int rc = 0;
+    if (mCameraOpened) {
+        *hw_device = NULL;
+        return PERMISSION_DENIED;
+    }
+    m_perfLock.lock_acq();
+    rc = openCamera();
+    if (rc == 0) {
+        *hw_device = &mCameraDevice.common;
+    } else
+        *hw_device = NULL;
+
+    m_perfLock.lock_rel();
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : openCamera
+ *
+ * DESCRIPTION: open camera
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HardwareInterface::openCamera()
+{
+    int rc = 0;
+
+    ATRACE_CALL();
+    if (mCameraHandle) {
+        ALOGE("Failure: Camera already opened");
+        return ALREADY_EXISTS;
+    }
+
+    rc = QCameraFlash::getInstance().reserveFlashForCamera(mCameraId);
+    if (rc < 0) {
+        ALOGE("%s: Failed to reserve flash for camera id: %d",
+                __func__,
+                mCameraId);
+        return UNKNOWN_ERROR;
+    }
+
+    rc = camera_open((uint8_t)mCameraId, &mCameraHandle);
+    if (rc) {
+        ALOGE("camera_open failed. rc = %d, mCameraHandle = %p", rc, mCameraHandle);
+        return rc;
+    }
+
+    mCameraOpened = true;
+
+    rc = mCameraHandle->ops->register_event_notify(mCameraHandle->camera_handle,
+            camEvtHandle, (void *)this);
+
+    if (rc < 0) {
+        ALOGE("%s: Error, failed to register event callback", __func__);
+        /* Not closing camera here since it is already handled in destructor */
+        return FAILED_TRANSACTION;
+    }
+    mFirstConfiguration = true;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : closeCamera
+ *
+ * DESCRIPTION: close camera
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HardwareInterface::closeCamera()
+{
+    ATRACE_CALL();
+    int rc = NO_ERROR;
+
+    rc = mCameraHandle->ops->close_camera(mCameraHandle->camera_handle);
+    mCameraHandle = NULL;
+    mCameraOpened = false;
+
+    if (QCameraFlash::getInstance().releaseFlashFromCamera(mCameraId) != 0) {
+        CDBG("%s: Failed to release flash for camera id: %d",
+                __func__,
+                mCameraId);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : initialize
+ *
+ * DESCRIPTION: Initialize frameworks callback functions
+ *
+ * PARAMETERS :
+ *   @callback_ops : callback function to frameworks
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+int QCamera3HardwareInterface::initialize(
+        const struct camera3_callback_ops *callback_ops)
+{
+    ATRACE_CALL();
+    int rc;
+
+    pthread_mutex_lock(&mMutex);
+
+    rc = initParameters();
+    if (rc < 0) {
+        ALOGE("%s: initParamters failed %d", __func__, rc);
+       goto err1;
+    }
+    mCallbackOps = callback_ops;
+
+    mChannelHandle = mCameraHandle->ops->add_channel(
+            mCameraHandle->camera_handle, NULL, NULL, this);
+    if (mChannelHandle == 0) {
+        ALOGE("%s: add_channel failed", __func__);
+        rc = -ENOMEM;
+        pthread_mutex_unlock(&mMutex);
+        return rc;
+    }
+
+    pthread_mutex_unlock(&mMutex);
+    mCameraInitialized = true;
+    return 0;
+
+err1:
+    pthread_mutex_unlock(&mMutex);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : validateStreamDimensions
+ *
+ * DESCRIPTION: Check if the configuration requested are those advertised
+ *
+ * PARAMETERS :
+ *   @stream_list : streams to be configured
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+int QCamera3HardwareInterface::validateStreamDimensions(
+        camera3_stream_configuration_t *streamList)
+{
+    int rc = NO_ERROR;
+    int32_t available_processed_sizes[MAX_SIZES_CNT * 2];
+    int32_t available_jpeg_sizes[MAX_SIZES_CNT * 2];
+    size_t count = 0;
+
+    camera3_stream_t *inputStream = NULL;
+    /*
+    * Loop through all streams to find input stream if it exists*
+    */
+    for (size_t i = 0; i< streamList->num_streams; i++) {
+        if (streamList->streams[i]->stream_type == CAMERA3_STREAM_INPUT) {
+            if (inputStream != NULL) {
+                ALOGE("%s: Error, Multiple input streams requested");
+                return -EINVAL;
+            }
+            inputStream = streamList->streams[i];
+        }
+    }
+    /*
+    * Loop through all streams requested in configuration
+    * Check if unsupported sizes have been requested on any of them
+    */
+    for (size_t j = 0; j < streamList->num_streams; j++) {
+        bool sizeFound = false;
+        size_t jpeg_sizes_cnt = 0;
+        camera3_stream_t *newStream = streamList->streams[j];
+
+        uint32_t rotatedHeight = newStream->height;
+        uint32_t rotatedWidth = newStream->width;
+        if ((newStream->rotation == CAMERA3_STREAM_ROTATION_90) ||
+                (newStream->rotation == CAMERA3_STREAM_ROTATION_270)) {
+            rotatedHeight = newStream->width;
+            rotatedWidth = newStream->height;
+        }
+
+        /*
+        * Sizes are different for each type of stream format check against
+        * appropriate table.
+        */
+        switch (newStream->format) {
+        case ANDROID_SCALER_AVAILABLE_FORMATS_RAW16:
+        case ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE:
+        case HAL_PIXEL_FORMAT_RAW10:
+            count = MIN(gCamCapability[mCameraId]->supported_raw_dim_cnt, MAX_SIZES_CNT);
+            for (size_t i = 0; i < count; i++) {
+                if ((gCamCapability[mCameraId]->raw_dim[i].width == (int32_t)rotatedWidth) &&
+                        (gCamCapability[mCameraId]->raw_dim[i].height == (int32_t)rotatedHeight)) {
+                    sizeFound = true;
+                    break;
+                }
+            }
+            break;
+        case HAL_PIXEL_FORMAT_BLOB:
+            count = MIN(gCamCapability[mCameraId]->picture_sizes_tbl_cnt, MAX_SIZES_CNT);
+            /* Generate JPEG sizes table */
+            makeTable(gCamCapability[mCameraId]->picture_sizes_tbl,
+                    count,
+                    MAX_SIZES_CNT,
+                    available_processed_sizes);
+            jpeg_sizes_cnt = filterJpegSizes(
+                    available_jpeg_sizes,
+                    available_processed_sizes,
+                    count * 2,
+                    MAX_SIZES_CNT * 2,
+                    gCamCapability[mCameraId]->active_array_size,
+                    gCamCapability[mCameraId]->max_downscale_factor);
+
+            /* Verify set size against generated sizes table */
+            for (size_t i = 0; i < (jpeg_sizes_cnt / 2); i++) {
+                if (((int32_t)rotatedWidth == available_jpeg_sizes[i*2]) &&
+                        ((int32_t)rotatedHeight == available_jpeg_sizes[i*2+1])) {
+                    sizeFound = true;
+                    break;
+                }
+            }
+            break;
+        case HAL_PIXEL_FORMAT_YCbCr_420_888:
+        case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+        default:
+            if (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL
+                    || newStream->stream_type == CAMERA3_STREAM_INPUT
+                    || IS_USAGE_ZSL(newStream->usage)) {
+                if (((int32_t)rotatedWidth ==
+                                gCamCapability[mCameraId]->active_array_size.width) &&
+                                ((int32_t)rotatedHeight ==
+                                gCamCapability[mCameraId]->active_array_size.height)) {
+                    sizeFound = true;
+                    break;
+                }
+                /* We could potentially break here to enforce ZSL stream
+                 * set from frameworks always is full active array size
+                 * but it is not clear from the spc if framework will always
+                 * follow that, also we have logic to override to full array
+                 * size, so keeping the logic lenient at the moment
+                 */
+            }
+            count = MIN(gCamCapability[mCameraId]->picture_sizes_tbl_cnt,
+                    MAX_SIZES_CNT);
+            for (size_t i = 0; i < count; i++) {
+                if (((int32_t)rotatedWidth ==
+                            gCamCapability[mCameraId]->picture_sizes_tbl[i].width) &&
+                            ((int32_t)rotatedHeight ==
+                            gCamCapability[mCameraId]->picture_sizes_tbl[i].height)) {
+                    sizeFound = true;
+                    break;
+                }
+            }
+            break;
+        } /* End of switch(newStream->format) */
+
+        /* We error out even if a single stream has unsupported size set */
+        if (!sizeFound) {
+            ALOGE("%s: Error: Unsupported size of  %d x %d requested for stream"
+                  "type:%d", __func__, rotatedWidth, rotatedHeight,
+                  newStream->format);
+            ALOGE("%s: Active array size is  %d x %d", __func__,
+                    gCamCapability[mCameraId]->active_array_size.width,
+                    gCamCapability[mCameraId]->active_array_size.height);
+            rc = -EINVAL;
+            break;
+        }
+    } /* End of for each stream */
+    return rc;
+}
+
+/*==============================================================================
+ * FUNCTION   : isSupportChannelNeeded
+ *
+ * DESCRIPTION: Simple heuristic func to determine if support channels is needed
+ *
+ * PARAMETERS :
+ *   @stream_list : streams to be configured
+ *   @stream_config_info : the config info for streams to be configured
+ *
+ * RETURN     : Boolen true/false decision
+ *
+ *==========================================================================*/
+bool QCamera3HardwareInterface::isSupportChannelNeeded(
+        camera3_stream_configuration_t *streamList,
+        cam_stream_size_info_t stream_config_info)
+{
+    uint32_t i;
+    bool pprocRequested = false;
+    /* Check for conditions where PProc pipeline does not have any streams*/
+    for (i = 0; i < stream_config_info.num_streams; i++) {
+        if (stream_config_info.type[i] != CAM_STREAM_TYPE_ANALYSIS &&
+                stream_config_info.postprocess_mask[i] != CAM_QCOM_FEATURE_NONE) {
+            pprocRequested = true;
+            break;
+        }
+    }
+
+    if (pprocRequested == false )
+        return true;
+
+    /* Dummy stream needed if only raw or jpeg streams present */
+    for (i = 0; i < streamList->num_streams; i++) {
+        switch(streamList->streams[i]->format) {
+            case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+            case HAL_PIXEL_FORMAT_RAW10:
+            case HAL_PIXEL_FORMAT_RAW16:
+            case HAL_PIXEL_FORMAT_BLOB:
+                break;
+            default:
+                return false;
+        }
+    }
+    return true;
+}
+
+/*==============================================================================
+ * FUNCTION   : getSensorOutputSize
+ *
+ * DESCRIPTION: Get sensor output size based on current stream configuratoin
+ *
+ * PARAMETERS :
+ *   @sensor_dim : sensor output dimension (output)
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::getSensorOutputSize(cam_dimension_t &sensor_dim)
+{
+    int32_t rc = NO_ERROR;
+
+    cam_dimension_t max_dim = {0, 0};
+    for (uint32_t i = 0; i < mStreamConfigInfo.num_streams; i++) {
+        if (mStreamConfigInfo.stream_sizes[i].width > max_dim.width)
+            max_dim.width = mStreamConfigInfo.stream_sizes[i].width;
+        if (mStreamConfigInfo.stream_sizes[i].height > max_dim.height)
+            max_dim.height = mStreamConfigInfo.stream_sizes[i].height;
+    }
+
+    clear_metadata_buffer(mParameters);
+
+    rc = ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_MAX_DIMENSION,
+            max_dim);
+    if (rc != NO_ERROR) {
+        ALOGE("%s:Failed to update table for CAM_INTF_PARM_MAX_DIMENSION", __func__);
+        return rc;
+    }
+
+    rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, mParameters);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: Failed to set CAM_INTF_PARM_MAX_DIMENSION", __func__);
+        return rc;
+    }
+
+    clear_metadata_buffer(mParameters);
+    ADD_GET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_RAW_DIMENSION);
+
+    rc = mCameraHandle->ops->get_parms(mCameraHandle->camera_handle,
+            mParameters);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: Failed to get CAM_INTF_PARM_RAW_DIMENSION", __func__);
+        return rc;
+    }
+
+    READ_PARAM_ENTRY(mParameters, CAM_INTF_PARM_RAW_DIMENSION, sensor_dim);
+    ALOGI("%s: sensor output dimension = %d x %d", __func__, sensor_dim.width, sensor_dim.height);
+
+    return rc;
+}
+
+/*==============================================================================
+ * FUNCTION   : enablePowerHint
+ *
+ * DESCRIPTION: enable single powerhint for preview and different video modes.
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : NULL
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::enablePowerHint()
+{
+    if (!mPowerHintEnabled) {
+        m_perfLock.powerHint(POWER_HINT_VIDEO_ENCODE, 1);
+        mPowerHintEnabled = true;
+    }
+}
+
+/*==============================================================================
+ * FUNCTION   : disablePowerHint
+ *
+ * DESCRIPTION: disable current powerhint.
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : NULL
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::disablePowerHint()
+{
+    if (mPowerHintEnabled) {
+        m_perfLock.powerHint(POWER_HINT_VIDEO_ENCODE, 0);
+        mPowerHintEnabled = false;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : configureStreams
+ *
+ * DESCRIPTION: Reset HAL camera device processing pipeline and set up new input
+ *              and output streams.
+ *
+ * PARAMETERS :
+ *   @stream_list : streams to be configured
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+int QCamera3HardwareInterface::configureStreams(
+        camera3_stream_configuration_t *streamList)
+{
+    ATRACE_CALL();
+    int rc = 0;
+
+    // Acquire perfLock before configure streams
+    m_perfLock.lock_acq();
+    rc = configureStreamsPerfLocked(streamList);
+    m_perfLock.lock_rel();
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configureStreamsPerfLocked
+ *
+ * DESCRIPTION: configureStreams while perfLock is held.
+ *
+ * PARAMETERS :
+ *   @stream_list : streams to be configured
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HardwareInterface::configureStreamsPerfLocked(
+        camera3_stream_configuration_t *streamList)
+{
+    ATRACE_CALL();
+    int rc = 0;
+
+    // Sanity check stream_list
+    if (streamList == NULL) {
+        ALOGE("%s: NULL stream configuration", __func__);
+        return BAD_VALUE;
+    }
+    if (streamList->streams == NULL) {
+        ALOGE("%s: NULL stream list", __func__);
+        return BAD_VALUE;
+    }
+
+    if (streamList->num_streams < 1) {
+        ALOGE("%s: Bad number of streams requested: %d", __func__,
+                streamList->num_streams);
+        return BAD_VALUE;
+    }
+
+    if (streamList->num_streams >= MAX_NUM_STREAMS) {
+        ALOGE("%s: Maximum number of streams %d exceeded: %d", __func__,
+                MAX_NUM_STREAMS, streamList->num_streams);
+        return BAD_VALUE;
+    }
+
+    mOpMode = streamList->operation_mode;
+    CDBG("%s: mOpMode: %d", __func__, mOpMode);
+
+    /* first invalidate all the steams in the mStreamList
+     * if they appear again, they will be validated */
+    for (List<stream_info_t*>::iterator it = mStreamInfo.begin();
+            it != mStreamInfo.end(); it++) {
+        QCamera3ProcessingChannel *channel = (QCamera3ProcessingChannel*)(*it)->stream->priv;
+        if (channel) {
+          channel->stop();
+        }
+        (*it)->status = INVALID;
+    }
+
+    if (mRawDumpChannel) {
+        mRawDumpChannel->stop();
+        delete mRawDumpChannel;
+        mRawDumpChannel = NULL;
+    }
+
+    if (mSupportChannel)
+        mSupportChannel->stop();
+
+    if (mAnalysisChannel) {
+        mAnalysisChannel->stop();
+    }
+    if (mMetadataChannel) {
+        /* If content of mStreamInfo is not 0, there is metadata stream */
+        mMetadataChannel->stop();
+    }
+    if (mChannelHandle) {
+        mCameraHandle->ops->stop_channel(mCameraHandle->camera_handle,
+                mChannelHandle);
+        ALOGI("%s: stopping channel %d", __func__, mChannelHandle);
+    }
+
+    pthread_mutex_lock(&mMutex);
+
+    /* Check whether we have video stream */
+    m_bIs4KVideo = false;
+    m_bIsVideo = false;
+    m_bEisSupportedSize = false;
+    m_bTnrEnabled = false;
+    bool isZsl = false;
+    uint32_t videoWidth = 0U;
+    uint32_t videoHeight = 0U;
+    size_t rawStreamCnt = 0;
+    size_t stallStreamCnt = 0;
+    size_t processedStreamCnt = 0;
+    // Number of streams on ISP encoder path
+    size_t numStreamsOnEncoder = 0;
+    size_t numYuv888OnEncoder = 0;
+    bool bYuv888OverrideJpeg = false;
+    cam_dimension_t largeYuv888Size = {0, 0};
+    cam_dimension_t maxViewfinderSize = {0, 0};
+    bool bJpegExceeds4K = false;
+    bool bJpegOnEncoder = false;
+    bool bUseCommonFeatureMask = false;
+    uint32_t commonFeatureMask = 0;
+    maxViewfinderSize = gCamCapability[mCameraId]->max_viewfinder_size;
+    camera3_stream_t *inputStream = NULL;
+    bool isJpeg = false;
+    cam_dimension_t jpegSize = {0, 0};
+
+    /*EIS configuration*/
+    bool eisSupported = false;
+    bool oisSupported = false;
+    int32_t margin_index = -1;
+    uint8_t eis_prop_set;
+    uint32_t maxEisWidth = 0;
+    uint32_t maxEisHeight = 0;
+    int32_t hal_version = CAM_HAL_V3;
+
+    memset(&mInputStreamInfo, 0, sizeof(mInputStreamInfo));
+
+    size_t count = IS_TYPE_MAX;
+    count = MIN(gCamCapability[mCameraId]->supported_is_types_cnt, count);
+    for (size_t i = 0; i < count; i++) {
+        if (gCamCapability[mCameraId]->supported_is_types[i] == IS_TYPE_EIS_2_0) {
+            eisSupported = true;
+            margin_index = (int32_t)i;
+            break;
+        }
+    }
+
+    count = CAM_OPT_STAB_MAX;
+    count = MIN(gCamCapability[mCameraId]->optical_stab_modes_count, count);
+    for (size_t i = 0; i < count; i++) {
+        if (gCamCapability[mCameraId]->optical_stab_modes[i] ==  CAM_OPT_STAB_ON) {
+            oisSupported = true;
+            break;
+        }
+    }
+
+    if (eisSupported) {
+        maxEisWidth = MAX_EIS_WIDTH;
+        maxEisHeight = MAX_EIS_HEIGHT;
+    }
+
+    /* EIS setprop control */
+    char eis_prop[PROPERTY_VALUE_MAX];
+    memset(eis_prop, 0, sizeof(eis_prop));
+    property_get("persist.camera.eis.enable", eis_prop, "0");
+    eis_prop_set = (uint8_t)atoi(eis_prop);
+
+    m_bEisEnable = eis_prop_set && (!oisSupported && eisSupported) &&
+            (mOpMode != CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE);
+
+    /* stream configurations */
+    for (size_t i = 0; i < streamList->num_streams; i++) {
+        camera3_stream_t *newStream = streamList->streams[i];
+        ALOGI("%s: stream[%d] type = %d, format = %d, width = %d, "
+                "height = %d, rotation = %d, usage = 0x%x",
+                __func__, i, newStream->stream_type, newStream->format,
+                newStream->width, newStream->height, newStream->rotation,
+                newStream->usage);
+        if (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL ||
+                newStream->stream_type == CAMERA3_STREAM_INPUT){
+            isZsl = true;
+        }
+        if (newStream->stream_type == CAMERA3_STREAM_INPUT){
+            inputStream = newStream;
+        }
+
+        if (newStream->format == HAL_PIXEL_FORMAT_BLOB) {
+            isJpeg = true;
+            jpegSize.width = newStream->width;
+            jpegSize.height = newStream->height;
+            if (newStream->width > VIDEO_4K_WIDTH ||
+                    newStream->height > VIDEO_4K_HEIGHT)
+                bJpegExceeds4K = true;
+        }
+
+        if ((HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED == newStream->format) &&
+                (newStream->usage & private_handle_t::PRIV_FLAGS_VIDEO_ENCODER)) {
+            m_bIsVideo = true;
+            videoWidth = newStream->width;
+            videoHeight = newStream->height;
+            if ((VIDEO_4K_WIDTH <= newStream->width) &&
+                    (VIDEO_4K_HEIGHT <= newStream->height)) {
+                m_bIs4KVideo = true;
+            }
+            m_bEisSupportedSize = (newStream->width <= maxEisWidth) &&
+                                  (newStream->height <= maxEisHeight);
+        }
+        if (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL ||
+                newStream->stream_type == CAMERA3_STREAM_OUTPUT) {
+            switch (newStream->format) {
+            case HAL_PIXEL_FORMAT_BLOB:
+                stallStreamCnt++;
+                if (isOnEncoder(maxViewfinderSize, newStream->width,
+                        newStream->height)) {
+                    commonFeatureMask |= CAM_QCOM_FEATURE_NONE;
+                    numStreamsOnEncoder++;
+                    bJpegOnEncoder = true;
+                }
+                break;
+            case HAL_PIXEL_FORMAT_RAW10:
+            case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+            case HAL_PIXEL_FORMAT_RAW16:
+                rawStreamCnt++;
+                break;
+            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+                processedStreamCnt++;
+                if (isOnEncoder(maxViewfinderSize, newStream->width,
+                        newStream->height)) {
+                    if (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL ||
+                            IS_USAGE_ZSL(newStream->usage)) {
+                        commonFeatureMask |= CAM_QCOM_FEATURE_NONE;
+                    } else {
+                        commonFeatureMask |= CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+                    }
+                    numStreamsOnEncoder++;
+                }
+                break;
+            case HAL_PIXEL_FORMAT_YCbCr_420_888:
+                processedStreamCnt++;
+                if (isOnEncoder(maxViewfinderSize, newStream->width,
+                        newStream->height)) {
+                    // If Yuv888 size is not greater than 4K, set feature mask
+                    // to SUPERSET so that it support concurrent request on
+                    // YUV and JPEG.
+                    if (newStream->width <= VIDEO_4K_WIDTH &&
+                            newStream->height <= VIDEO_4K_HEIGHT) {
+                        commonFeatureMask |= CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+                    } else {
+                        commonFeatureMask |= CAM_QCOM_FEATURE_NONE;
+                    }
+                    numStreamsOnEncoder++;
+                    numYuv888OnEncoder++;
+                    largeYuv888Size.width = newStream->width;
+                    largeYuv888Size.height = newStream->height;
+                }
+                break;
+            default:
+                processedStreamCnt++;
+                if (isOnEncoder(maxViewfinderSize, newStream->width,
+                        newStream->height)) {
+                    commonFeatureMask |= CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+                    numStreamsOnEncoder++;
+                }
+                break;
+            }
+
+        }
+    }
+
+    if (gCamCapability[mCameraId]->position == CAM_POSITION_FRONT ||
+        !m_bIsVideo) {
+        m_bEisEnable = false;
+    }
+
+    /* Logic to enable/disable TNR based on specific config size/etc.*/
+    if ((m_bTnrPreview || m_bTnrVideo) && m_bIsVideo &&
+            ((videoWidth == 1920 && videoHeight == 1080) ||
+            (videoWidth == 1280 && videoHeight == 720)) &&
+            (mOpMode != CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE))
+        m_bTnrEnabled = true;
+
+    /* Check if num_streams is sane */
+    if (stallStreamCnt > MAX_STALLING_STREAMS ||
+            rawStreamCnt > MAX_RAW_STREAMS ||
+            processedStreamCnt > MAX_PROCESSED_STREAMS) {
+        ALOGE("%s: Invalid stream configu: stall: %d, raw: %d, processed %d",
+                __func__, stallStreamCnt, rawStreamCnt, processedStreamCnt);
+        pthread_mutex_unlock(&mMutex);
+        return -EINVAL;
+    }
+    /* Check whether we have zsl stream or 4k video case */
+    if (isZsl && m_bIsVideo) {
+        ALOGE("%s: Currently invalid configuration ZSL&Video!", __func__);
+        pthread_mutex_unlock(&mMutex);
+        return -EINVAL;
+    }
+    /* Check if stream sizes are sane */
+    if (numStreamsOnEncoder > 2) {
+        ALOGE("%s: Number of streams on ISP encoder path exceeds limits of 2",
+                __func__);
+        pthread_mutex_unlock(&mMutex);
+        return -EINVAL;
+    } else if (1 < numStreamsOnEncoder){
+        bUseCommonFeatureMask = true;
+        CDBG_HIGH("%s: Multiple streams above max viewfinder size, common mask needed",
+                __func__);
+    }
+
+    /* Check if BLOB size is greater than 4k in 4k recording case */
+    if (m_bIs4KVideo && bJpegExceeds4K) {
+        ALOGE("%s: HAL doesn't support Blob size greater than 4k in 4k recording",
+                __func__);
+        pthread_mutex_unlock(&mMutex);
+        return -EINVAL;
+    }
+
+    // When JPEG and preview streams share VFE output, CPP will not apply CAC2
+    // on JPEG stream. So disable such configurations to ensure CAC2 is applied.
+    // Don't fail for reprocess configurations. Also don't fail if bJpegExceeds4K
+    // is not true. Otherwise testMandatoryOutputCombinations will fail with following
+    // configurations:
+    //    {[PRIV, PREVIEW] [PRIV, RECORD] [JPEG, RECORD]}
+    //    {[PRIV, PREVIEW] [YUV, RECORD] [JPEG, RECORD]}
+    //    (These two configurations will not have CAC2 enabled even in HQ modes.)
+    if (!isZsl && bJpegOnEncoder && bJpegExceeds4K && bUseCommonFeatureMask) {
+        ALOGE("%s: Blob size greater than 4k and multiple streams are on encoder output",
+                __func__);
+        pthread_mutex_unlock(&mMutex);
+        return -EINVAL;
+    }
+
+    // If jpeg stream is available, and a YUV 888 stream is on Encoder path, and
+    // the YUV stream's size is greater or equal to the JPEG size, set common
+    // postprocess mask to NONE, so that we can take advantage of postproc bypass.
+    if (numYuv888OnEncoder && isOnEncoder(maxViewfinderSize,
+            jpegSize.width, jpegSize.height) &&
+            largeYuv888Size.width > jpegSize.width &&
+            largeYuv888Size.height > jpegSize.height) {
+        bYuv888OverrideJpeg = true;
+    } else if (!isJpeg && numStreamsOnEncoder > 1) {
+        commonFeatureMask = CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+    }
+
+    rc = validateStreamDimensions(streamList);
+    if (rc == NO_ERROR) {
+        rc = validateStreamRotations(streamList);
+    }
+    if (rc != NO_ERROR) {
+        ALOGE("%s: Invalid stream configuration requested!", __func__);
+        pthread_mutex_unlock(&mMutex);
+        return rc;
+    }
+
+    camera3_stream_t *zslStream = NULL; //Only use this for size and not actual handle!
+    camera3_stream_t *jpegStream = NULL;
+    for (size_t i = 0; i < streamList->num_streams; i++) {
+        camera3_stream_t *newStream = streamList->streams[i];
+        CDBG_HIGH("%s: newStream type = %d, stream format = %d "
+                "stream size : %d x %d, stream rotation = %d",
+                __func__, newStream->stream_type, newStream->format,
+                newStream->width, newStream->height, newStream->rotation);
+        //if the stream is in the mStreamList validate it
+        bool stream_exists = false;
+        for (List<stream_info_t*>::iterator it=mStreamInfo.begin();
+                it != mStreamInfo.end(); it++) {
+            if ((*it)->stream == newStream) {
+                QCamera3ProcessingChannel *channel =
+                    (QCamera3ProcessingChannel*)(*it)->stream->priv;
+                stream_exists = true;
+                if (channel)
+                    delete channel;
+                (*it)->status = VALID;
+                (*it)->stream->priv = NULL;
+                (*it)->channel = NULL;
+            }
+        }
+        if (!stream_exists && newStream->stream_type != CAMERA3_STREAM_INPUT) {
+            //new stream
+            stream_info_t* stream_info;
+            stream_info = (stream_info_t* )malloc(sizeof(stream_info_t));
+            if (!stream_info) {
+               ALOGE("%s: Could not allocate stream info", __func__);
+               rc = -ENOMEM;
+               pthread_mutex_unlock(&mMutex);
+               return rc;
+            }
+            stream_info->stream = newStream;
+            stream_info->status = VALID;
+            stream_info->channel = NULL;
+            mStreamInfo.push_back(stream_info);
+        }
+        /* Covers Opaque ZSL and API1 F/W ZSL */
+        if (IS_USAGE_ZSL(newStream->usage)
+                || newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL ) {
+            if (zslStream != NULL) {
+                ALOGE("%s: Multiple input/reprocess streams requested!", __func__);
+                pthread_mutex_unlock(&mMutex);
+                return BAD_VALUE;
+            }
+            zslStream = newStream;
+        }
+        /* Covers YUV reprocess */
+        if (inputStream != NULL) {
+            if (newStream->stream_type == CAMERA3_STREAM_OUTPUT
+                    && newStream->format == HAL_PIXEL_FORMAT_YCbCr_420_888
+                    && inputStream->format == HAL_PIXEL_FORMAT_YCbCr_420_888
+                    && inputStream->width == newStream->width
+                    && inputStream->height == newStream->height) {
+                if (zslStream != NULL) {
+                    /* This scenario indicates multiple YUV streams with same size
+                     * as input stream have been requested, since zsl stream handle
+                     * is solely use for the purpose of overriding the size of streams
+                     * which share h/w streams we will just make a guess here as to
+                     * which of the stream is a ZSL stream, this will be refactored
+                     * once we make generic logic for streams sharing encoder output
+                     */
+                    CDBG_HIGH("%s: Warning, Multiple ip/reprocess streams requested!", __func__);
+                }
+                zslStream = newStream;
+            }
+        }
+        if (newStream->format == HAL_PIXEL_FORMAT_BLOB) {
+            jpegStream = newStream;
+        }
+    }
+
+    /* If a zsl stream is set, we know that we have configured at least one input or
+       bidirectional stream */
+    if (NULL != zslStream) {
+        mInputStreamInfo.dim.width = (int32_t)zslStream->width;
+        mInputStreamInfo.dim.height = (int32_t)zslStream->height;
+        mInputStreamInfo.format = zslStream->format;
+        mInputStreamInfo.usage = zslStream->usage;
+        CDBG("%s: Input stream configured! %d x %d, format %d, usage %d",
+                __func__, mInputStreamInfo.dim.width,
+                mInputStreamInfo.dim.height,
+                mInputStreamInfo.format, mInputStreamInfo.usage);
+    }
+
+    cleanAndSortStreamInfo();
+    if (mMetadataChannel) {
+        delete mMetadataChannel;
+        mMetadataChannel = NULL;
+    }
+    if (mSupportChannel) {
+        delete mSupportChannel;
+        mSupportChannel = NULL;
+    }
+
+    if (mAnalysisChannel) {
+        delete mAnalysisChannel;
+        mAnalysisChannel = NULL;
+    }
+
+    if (mDummyBatchChannel) {
+        delete mDummyBatchChannel;
+        mDummyBatchChannel = NULL;
+    }
+
+    //Create metadata channel and initialize it
+    mMetadataChannel = new QCamera3MetadataChannel(mCameraHandle->camera_handle,
+                    mChannelHandle, mCameraHandle->ops, captureResultCb,
+                    &gCamCapability[mCameraId]->padding_info, CAM_QCOM_FEATURE_NONE, this);
+    if (mMetadataChannel == NULL) {
+        ALOGE("%s: failed to allocate metadata channel", __func__);
+        rc = -ENOMEM;
+        pthread_mutex_unlock(&mMutex);
+        return rc;
+    }
+    rc = mMetadataChannel->initialize(IS_TYPE_NONE);
+    if (rc < 0) {
+        ALOGE("%s: metadata channel initialization failed", __func__);
+        delete mMetadataChannel;
+        mMetadataChannel = NULL;
+        pthread_mutex_unlock(&mMutex);
+        return rc;
+    }
+
+    // Create analysis stream all the time, even when h/w support is not available
+    {
+        mAnalysisChannel = new QCamera3SupportChannel(
+                mCameraHandle->camera_handle,
+                mChannelHandle,
+                mCameraHandle->ops,
+                &gCamCapability[mCameraId]->padding_info,
+                CAM_QCOM_FEATURE_PP_SUPERSET_HAL3,
+                CAM_STREAM_TYPE_ANALYSIS,
+                &gCamCapability[mCameraId]->analysis_recommended_res,
+                gCamCapability[mCameraId]->analysis_recommended_format,
+                this,
+                0); // force buffer count to 0
+        if (!mAnalysisChannel) {
+            ALOGE("%s: H/W Analysis channel cannot be created", __func__);
+            pthread_mutex_unlock(&mMutex);
+            return -ENOMEM;
+        }
+    }
+
+    bool isRawStreamRequested = false;
+    memset(&mStreamConfigInfo, 0, sizeof(cam_stream_size_info_t));
+    /* Allocate channel objects for the requested streams */
+    for (size_t i = 0; i < streamList->num_streams; i++) {
+        camera3_stream_t *newStream = streamList->streams[i];
+        uint32_t stream_usage = newStream->usage;
+        mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width = (int32_t)newStream->width;
+        mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height = (int32_t)newStream->height;
+        if ((newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL
+                || IS_USAGE_ZSL(newStream->usage)) &&
+            newStream->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED){
+            mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_SNAPSHOT;
+            if (bUseCommonFeatureMask) {
+                mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] =
+                        commonFeatureMask;
+            } else {
+                mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] =
+                        CAM_QCOM_FEATURE_NONE;
+            }
+
+        } else if(newStream->stream_type == CAMERA3_STREAM_INPUT) {
+                CDBG_HIGH("%s: Input stream configured, reprocess config", __func__);
+        } else {
+            //for non zsl streams find out the format
+            switch (newStream->format) {
+            case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED :
+              {
+                 mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams]
+                         = CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+
+                 if (stream_usage & private_handle_t::PRIV_FLAGS_VIDEO_ENCODER) {
+
+                     mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_VIDEO;
+                     if (m_bTnrEnabled && m_bTnrVideo) {
+                         mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] |=
+                             CAM_QCOM_FEATURE_CPP_TNR;
+                     }
+
+                 } else {
+
+                     mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_PREVIEW;
+                     if (m_bTnrEnabled && m_bTnrPreview) {
+                         mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] |=
+                             CAM_QCOM_FEATURE_CPP_TNR;
+                     }
+                 }
+
+                 if ((newStream->rotation == CAMERA3_STREAM_ROTATION_90) ||
+                         (newStream->rotation == CAMERA3_STREAM_ROTATION_270)) {
+                     mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width =
+                             newStream->height;
+                     mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height =
+                             newStream->width;
+                 }
+              }
+              break;
+           case HAL_PIXEL_FORMAT_YCbCr_420_888:
+              mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_CALLBACK;
+              if (isOnEncoder(maxViewfinderSize, newStream->width,
+                      newStream->height)) {
+                  if (bUseCommonFeatureMask)
+                      mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] =
+                              commonFeatureMask;
+                  else
+                      mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] =
+                              CAM_QCOM_FEATURE_NONE;
+              } else {
+                  mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] =
+                          CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+              }
+              break;
+           case HAL_PIXEL_FORMAT_BLOB:
+              mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_SNAPSHOT;
+              if (m_bIs4KVideo && !isZsl) {
+                  mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams]
+                          = CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+              } else {
+                  if (bUseCommonFeatureMask &&
+                          isOnEncoder(maxViewfinderSize, newStream->width,
+                                  newStream->height)) {
+                      mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = commonFeatureMask;
+                  } else {
+                      mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = CAM_QCOM_FEATURE_NONE;
+                  }
+              }
+              if (isZsl) {
+                  if (zslStream) {
+                      mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width =
+                              (int32_t)zslStream->width;
+                      mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height =
+                              (int32_t)zslStream->height;
+                  } else {
+                      ALOGE("%s: Error, No ZSL stream identified",__func__);
+                      pthread_mutex_unlock(&mMutex);
+                      return -EINVAL;
+                  }
+              } else if (m_bIs4KVideo) {
+                  mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width =
+                          (int32_t)videoWidth;
+                  mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height =
+                          (int32_t)videoHeight;
+              } else if (bYuv888OverrideJpeg) {
+                  mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width =
+                          (int32_t)largeYuv888Size.width;
+                  mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height =
+                          (int32_t)largeYuv888Size.height;
+              }
+              break;
+           case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+           case HAL_PIXEL_FORMAT_RAW16:
+           case HAL_PIXEL_FORMAT_RAW10:
+              mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_RAW;
+              isRawStreamRequested = true;
+              break;
+           default:
+              mStreamConfigInfo.type[mStreamConfigInfo.num_streams] = CAM_STREAM_TYPE_DEFAULT;
+              mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] = CAM_QCOM_FEATURE_NONE;
+              break;
+            }
+
+        }
+
+        if (newStream->priv == NULL) {
+            //New stream, construct channel
+            switch (newStream->stream_type) {
+            case CAMERA3_STREAM_INPUT:
+                newStream->usage |= GRALLOC_USAGE_HW_CAMERA_READ;
+                newStream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE;//WR for inplace algo's
+                break;
+            case CAMERA3_STREAM_BIDIRECTIONAL:
+                newStream->usage |= GRALLOC_USAGE_HW_CAMERA_READ |
+                    GRALLOC_USAGE_HW_CAMERA_WRITE;
+                break;
+            case CAMERA3_STREAM_OUTPUT:
+                /* For video encoding stream, set read/write rarely
+                 * flag so that they may be set to un-cached */
+                if (newStream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)
+                    newStream->usage |=
+                         (GRALLOC_USAGE_SW_READ_RARELY |
+                         GRALLOC_USAGE_SW_WRITE_RARELY |
+                         GRALLOC_USAGE_HW_CAMERA_WRITE);
+                else if (IS_USAGE_ZSL(newStream->usage))
+                    CDBG("%s: ZSL usage flag skipping", __func__);
+                else if (newStream == zslStream
+                        || newStream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+                    newStream->usage |= GRALLOC_USAGE_HW_CAMERA_ZSL;
+                } else
+                    newStream->usage |= GRALLOC_USAGE_HW_CAMERA_WRITE;
+                break;
+            default:
+                ALOGE("%s: Invalid stream_type %d", __func__, newStream->stream_type);
+                break;
+            }
+
+            if (newStream->stream_type == CAMERA3_STREAM_OUTPUT ||
+                    newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) {
+                QCamera3ProcessingChannel *channel = NULL;
+                switch (newStream->format) {
+                case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+                    if ((newStream->usage &
+                            private_handle_t::PRIV_FLAGS_VIDEO_ENCODER) &&
+                            (streamList->operation_mode ==
+                            CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE)
+                    ) {
+                        channel = new QCamera3RegularChannel(mCameraHandle->camera_handle,
+                                mChannelHandle, mCameraHandle->ops, captureResultCb,
+                                &gCamCapability[mCameraId]->padding_info,
+                                this,
+                                newStream,
+                                (cam_stream_type_t)
+                                        mStreamConfigInfo.type[mStreamConfigInfo.num_streams],
+                                mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams],
+                                mMetadataChannel,
+                                0); //heap buffers are not required for HFR video channel
+                        if (channel == NULL) {
+                            ALOGE("%s: allocation of channel failed", __func__);
+                            pthread_mutex_unlock(&mMutex);
+                            return -ENOMEM;
+                        }
+                        //channel->getNumBuffers() will return 0 here so use
+                        //MAX_INFLIGH_HFR_REQUESTS
+                        newStream->max_buffers = MAX_INFLIGHT_HFR_REQUESTS;
+                        newStream->priv = channel;
+                        ALOGI("%s: num video buffers in HFR mode: %d",
+                                __func__, MAX_INFLIGHT_HFR_REQUESTS);
+                    } else {
+                        /* Copy stream contents in HFR preview only case to create
+                         * dummy batch channel so that sensor streaming is in
+                         * HFR mode */
+                        if (!m_bIsVideo && (streamList->operation_mode ==
+                                CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE)) {
+                            mDummyBatchStream = *newStream;
+                        }
+                        channel = new QCamera3RegularChannel(mCameraHandle->camera_handle,
+                                mChannelHandle, mCameraHandle->ops, captureResultCb,
+                                &gCamCapability[mCameraId]->padding_info,
+                                this,
+                                newStream,
+                                (cam_stream_type_t)
+                                        mStreamConfigInfo.type[mStreamConfigInfo.num_streams],
+                                mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams],
+                                mMetadataChannel,
+                                MAX_INFLIGHT_REQUESTS);
+                        if (channel == NULL) {
+                            ALOGE("%s: allocation of channel failed", __func__);
+                            pthread_mutex_unlock(&mMutex);
+                            return -ENOMEM;
+                        }
+                        newStream->max_buffers = channel->getNumBuffers();
+                        newStream->priv = channel;
+                    }
+                    break;
+                case HAL_PIXEL_FORMAT_YCbCr_420_888: {
+                    channel = new QCamera3YUVChannel(mCameraHandle->camera_handle,
+                            mChannelHandle,
+                            mCameraHandle->ops, captureResultCb,
+                            &gCamCapability[mCameraId]->padding_info,
+                            this,
+                            newStream,
+                            (cam_stream_type_t)
+                                    mStreamConfigInfo.type[mStreamConfigInfo.num_streams],
+                            mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams],
+                            mMetadataChannel);
+                    if (channel == NULL) {
+                        ALOGE("%s: allocation of YUV channel failed", __func__);
+                        pthread_mutex_unlock(&mMutex);
+                        return -ENOMEM;
+                    }
+                    newStream->max_buffers = channel->getNumBuffers();
+                    newStream->priv = channel;
+                    break;
+                }
+                case HAL_PIXEL_FORMAT_RAW_OPAQUE:
+                case HAL_PIXEL_FORMAT_RAW16:
+                case HAL_PIXEL_FORMAT_RAW10:
+                    mRawChannel = new QCamera3RawChannel(
+                            mCameraHandle->camera_handle, mChannelHandle,
+                            mCameraHandle->ops, captureResultCb,
+                            &gCamCapability[mCameraId]->padding_info,
+                            this, newStream, CAM_QCOM_FEATURE_NONE,
+                            mMetadataChannel,
+                            (newStream->format == HAL_PIXEL_FORMAT_RAW16));
+                    if (mRawChannel == NULL) {
+                        ALOGE("%s: allocation of raw channel failed", __func__);
+                        pthread_mutex_unlock(&mMutex);
+                        return -ENOMEM;
+                    }
+                    newStream->max_buffers = mRawChannel->getNumBuffers();
+                    newStream->priv = (QCamera3ProcessingChannel*)mRawChannel;
+                    break;
+                case HAL_PIXEL_FORMAT_BLOB:
+                    // Max live snapshot inflight buffer is 1. This is to mitigate
+                    // frame drop issues for video snapshot. The more buffers being
+                    // allocated, the more frame drops there are.
+                    mPictureChannel = new QCamera3PicChannel(
+                            mCameraHandle->camera_handle, mChannelHandle,
+                            mCameraHandle->ops, captureResultCb,
+                            &gCamCapability[mCameraId]->padding_info, this, newStream,
+                            mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams],
+                            m_bIs4KVideo, isZsl, mMetadataChannel,
+                            (m_bIsVideo ? 1 : MAX_INFLIGHT_BLOB));
+                    if (mPictureChannel == NULL) {
+                        ALOGE("%s: allocation of channel failed", __func__);
+                        pthread_mutex_unlock(&mMutex);
+                        return -ENOMEM;
+                    }
+                    newStream->priv = (QCamera3ProcessingChannel*)mPictureChannel;
+                    newStream->max_buffers = mPictureChannel->getNumBuffers();
+                    mPictureChannel->overrideYuvSize(
+                            mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width,
+                            mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height);
+                    break;
+
+                default:
+                    ALOGE("%s: not a supported format 0x%x", __func__, newStream->format);
+                    break;
+                }
+            } else if (newStream->stream_type == CAMERA3_STREAM_INPUT) {
+                newStream->max_buffers = MAX_INFLIGHT_REPROCESS_REQUESTS;
+            } else {
+                ALOGE("%s: Error, Unknown stream type", __func__);
+                return -EINVAL;
+            }
+
+            for (List<stream_info_t*>::iterator it=mStreamInfo.begin();
+                    it != mStreamInfo.end(); it++) {
+                if ((*it)->stream == newStream) {
+                    (*it)->channel = (QCamera3ProcessingChannel*) newStream->priv;
+                    break;
+                }
+            }
+        } else {
+            // Channel already exists for this stream
+            // Do nothing for now
+        }
+
+    /* Do not add entries for input stream in metastream info
+         * since there is no real stream associated with it
+         */
+        if (newStream->stream_type != CAMERA3_STREAM_INPUT)
+            mStreamConfigInfo.num_streams++;
+    }
+
+    //RAW DUMP channel
+    if (mEnableRawDump && isRawStreamRequested == false){
+        cam_dimension_t rawDumpSize;
+        rawDumpSize = getMaxRawSize(mCameraId);
+        mRawDumpChannel = new QCamera3RawDumpChannel(mCameraHandle->camera_handle,
+                                  mChannelHandle,
+                                  mCameraHandle->ops,
+                                  rawDumpSize,
+                                  &gCamCapability[mCameraId]->padding_info,
+                                  this, CAM_QCOM_FEATURE_NONE);
+        if (!mRawDumpChannel) {
+            ALOGE("%s: Raw Dump channel cannot be created", __func__);
+            pthread_mutex_unlock(&mMutex);
+            return -ENOMEM;
+        }
+    }
+
+
+    if (mAnalysisChannel) {
+        mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams] =
+                gCamCapability[mCameraId]->analysis_recommended_res;
+        mStreamConfigInfo.type[mStreamConfigInfo.num_streams] =
+                CAM_STREAM_TYPE_ANALYSIS;
+        mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] =
+                CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+        mStreamConfigInfo.num_streams++;
+    }
+
+    if (isSupportChannelNeeded(streamList, mStreamConfigInfo)) {
+        mSupportChannel = new QCamera3SupportChannel(
+                mCameraHandle->camera_handle,
+                mChannelHandle,
+                mCameraHandle->ops,
+                &gCamCapability[mCameraId]->padding_info,
+                CAM_QCOM_FEATURE_PP_SUPERSET_HAL3,
+                CAM_STREAM_TYPE_CALLBACK,
+                &QCamera3SupportChannel::kDim,
+                CAM_FORMAT_YUV_420_NV21,
+                this);
+        if (!mSupportChannel) {
+            ALOGE("%s: dummy channel cannot be created", __func__);
+            pthread_mutex_unlock(&mMutex);
+            return -ENOMEM;
+        }
+    }
+
+    if (mSupportChannel) {
+        mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams] =
+                QCamera3SupportChannel::kDim;
+        mStreamConfigInfo.type[mStreamConfigInfo.num_streams] =
+                CAM_STREAM_TYPE_CALLBACK;
+        mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] =
+                CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+        mStreamConfigInfo.num_streams++;
+    }
+
+    if (mRawDumpChannel) {
+        cam_dimension_t rawSize;
+        rawSize = getMaxRawSize(mCameraId);
+        mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams] =
+                rawSize;
+        mStreamConfigInfo.type[mStreamConfigInfo.num_streams] =
+                CAM_STREAM_TYPE_RAW;
+        mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] =
+                CAM_QCOM_FEATURE_NONE;
+        mStreamConfigInfo.num_streams++;
+    }
+    /* In HFR mode, if video stream is not added, create a dummy channel so that
+     * ISP can create a batch mode even for preview only case. This channel is
+     * never 'start'ed (no stream-on), it is only 'initialized'  */
+    if ((mOpMode == CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE) &&
+            !m_bIsVideo) {
+        mDummyBatchChannel = new QCamera3RegularChannel(mCameraHandle->camera_handle,
+                mChannelHandle,
+                mCameraHandle->ops, captureResultCb,
+                &gCamCapability[mCameraId]->padding_info,
+                this,
+                &mDummyBatchStream,
+                CAM_STREAM_TYPE_VIDEO,
+                CAM_QCOM_FEATURE_PP_SUPERSET_HAL3,
+                mMetadataChannel);
+        if (NULL == mDummyBatchChannel) {
+            ALOGE("%s: creation of mDummyBatchChannel failed."
+                    "Preview will use non-hfr sensor mode ", __func__);
+        }
+    }
+    if (mDummyBatchChannel) {
+        mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].width =
+                mDummyBatchStream.width;
+        mStreamConfigInfo.stream_sizes[mStreamConfigInfo.num_streams].height =
+                mDummyBatchStream.height;
+        mStreamConfigInfo.type[mStreamConfigInfo.num_streams] =
+                CAM_STREAM_TYPE_VIDEO;
+        mStreamConfigInfo.postprocess_mask[mStreamConfigInfo.num_streams] =
+                CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+        mStreamConfigInfo.num_streams++;
+    }
+
+    mStreamConfigInfo.buffer_info.min_buffers = MIN_INFLIGHT_REQUESTS;
+    mStreamConfigInfo.buffer_info.max_buffers =
+            m_bIs4KVideo ? 0 : MAX_INFLIGHT_REQUESTS;
+
+    /* Initialize mPendingRequestInfo and mPendnigBuffersMap */
+    for (pendingRequestIterator i = mPendingRequestsList.begin();
+            i != mPendingRequestsList.end();) {
+        i = erasePendingRequest(i);
+    }
+    mPendingFrameDropList.clear();
+    // Initialize/Reset the pending buffers list
+    mPendingBuffersMap.num_buffers = 0;
+    mPendingBuffersMap.mPendingBufferList.clear();
+    mPendingReprocessResultList.clear();
+
+    mFirstRequest = true;
+    mCurJpegMeta.clear();
+    //Get min frame duration for this streams configuration
+    deriveMinFrameDuration();
+
+    /* Turn on video hint only if video stream is configured */
+
+    pthread_mutex_unlock(&mMutex);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : validateCaptureRequest
+ *
+ * DESCRIPTION: validate a capture request from camera service
+ *
+ * PARAMETERS :
+ *   @request : request from framework to process
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+int QCamera3HardwareInterface::validateCaptureRequest(
+                    camera3_capture_request_t *request)
+{
+    ssize_t idx = 0;
+    const camera3_stream_buffer_t *b;
+    CameraMetadata meta;
+
+    /* Sanity check the request */
+    if (request == NULL) {
+        ALOGE("%s: NULL capture request", __func__);
+        return BAD_VALUE;
+    }
+
+    if (request->settings == NULL && mFirstRequest) {
+        /*settings cannot be null for the first request*/
+        return BAD_VALUE;
+    }
+
+    uint32_t frameNumber = request->frame_number;
+    if (request->num_output_buffers < 1 || request->output_buffers == NULL) {
+        ALOGE("%s: Request %d: No output buffers provided!",
+                __FUNCTION__, frameNumber);
+        return BAD_VALUE;
+    }
+    if (request->num_output_buffers >= MAX_NUM_STREAMS) {
+        ALOGE("%s: Number of buffers %d equals or is greater than maximum number of streams!",
+                __func__, request->num_output_buffers, MAX_NUM_STREAMS);
+        return BAD_VALUE;
+    }
+    if (request->input_buffer != NULL) {
+        b = request->input_buffer;
+        if (b->status != CAMERA3_BUFFER_STATUS_OK) {
+            ALOGE("%s: Request %d: Buffer %ld: Status not OK!",
+                    __func__, frameNumber, (long)idx);
+            return BAD_VALUE;
+        }
+        if (b->release_fence != -1) {
+            ALOGE("%s: Request %d: Buffer %ld: Has a release fence!",
+                    __func__, frameNumber, (long)idx);
+            return BAD_VALUE;
+        }
+        if (b->buffer == NULL) {
+            ALOGE("%s: Request %d: Buffer %ld: NULL buffer handle!",
+                    __func__, frameNumber, (long)idx);
+            return BAD_VALUE;
+        }
+    }
+
+    // Validate all buffers
+    b = request->output_buffers;
+    do {
+        QCamera3ProcessingChannel *channel =
+                static_cast<QCamera3ProcessingChannel*>(b->stream->priv);
+        if (channel == NULL) {
+            ALOGE("%s: Request %d: Buffer %ld: Unconfigured stream!",
+                    __func__, frameNumber, (long)idx);
+            return BAD_VALUE;
+        }
+        if (b->status != CAMERA3_BUFFER_STATUS_OK) {
+            ALOGE("%s: Request %d: Buffer %ld: Status not OK!",
+                    __func__, frameNumber, (long)idx);
+            return BAD_VALUE;
+        }
+        if (b->release_fence != -1) {
+            ALOGE("%s: Request %d: Buffer %ld: Has a release fence!",
+                    __func__, frameNumber, (long)idx);
+            return BAD_VALUE;
+        }
+        if (b->buffer == NULL) {
+            ALOGE("%s: Request %d: Buffer %ld: NULL buffer handle!",
+                    __func__, frameNumber, (long)idx);
+            return BAD_VALUE;
+        }
+        if (*(b->buffer) == NULL) {
+            ALOGE("%s: Request %d: Buffer %ld: NULL private handle!",
+                    __func__, frameNumber, (long)idx);
+            return BAD_VALUE;
+        }
+        idx++;
+        b = request->output_buffers + idx;
+    } while (idx < (ssize_t)request->num_output_buffers);
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : deriveMinFrameDuration
+ *
+ * DESCRIPTION: derive mininum processed, jpeg, and raw frame durations based
+ *              on currently configured streams.
+ *
+ * PARAMETERS : NONE
+ *
+ * RETURN     : NONE
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::deriveMinFrameDuration()
+{
+    int32_t maxJpegDim, maxProcessedDim, maxRawDim;
+
+    maxJpegDim = 0;
+    maxProcessedDim = 0;
+    maxRawDim = 0;
+
+    // Figure out maximum jpeg, processed, and raw dimensions
+    for (List<stream_info_t*>::iterator it = mStreamInfo.begin();
+        it != mStreamInfo.end(); it++) {
+
+        // Input stream doesn't have valid stream_type
+        if ((*it)->stream->stream_type == CAMERA3_STREAM_INPUT)
+            continue;
+
+        int32_t dimension = (int32_t)((*it)->stream->width * (*it)->stream->height);
+        if ((*it)->stream->format == HAL_PIXEL_FORMAT_BLOB) {
+            if (dimension > maxJpegDim)
+                maxJpegDim = dimension;
+        } else if ((*it)->stream->format == HAL_PIXEL_FORMAT_RAW_OPAQUE ||
+                (*it)->stream->format == HAL_PIXEL_FORMAT_RAW10 ||
+                (*it)->stream->format == HAL_PIXEL_FORMAT_RAW16) {
+            if (dimension > maxRawDim)
+                maxRawDim = dimension;
+        } else {
+            if (dimension > maxProcessedDim)
+                maxProcessedDim = dimension;
+        }
+    }
+
+    size_t count = MIN(gCamCapability[mCameraId]->supported_raw_dim_cnt,
+            MAX_SIZES_CNT);
+
+    //Assume all jpeg dimensions are in processed dimensions.
+    if (maxJpegDim > maxProcessedDim)
+        maxProcessedDim = maxJpegDim;
+    //Find the smallest raw dimension that is greater or equal to jpeg dimension
+    if (maxProcessedDim > maxRawDim) {
+        maxRawDim = INT32_MAX;
+
+        for (size_t i = 0; i < count; i++) {
+            int32_t dimension = gCamCapability[mCameraId]->raw_dim[i].width *
+                    gCamCapability[mCameraId]->raw_dim[i].height;
+            if (dimension >= maxProcessedDim && dimension < maxRawDim)
+                maxRawDim = dimension;
+        }
+    }
+
+    //Find minimum durations for processed, jpeg, and raw
+    for (size_t i = 0; i < count; i++) {
+        if (maxRawDim == gCamCapability[mCameraId]->raw_dim[i].width *
+                gCamCapability[mCameraId]->raw_dim[i].height) {
+            mMinRawFrameDuration = gCamCapability[mCameraId]->raw_min_duration[i];
+            break;
+        }
+    }
+    count = MIN(gCamCapability[mCameraId]->picture_sizes_tbl_cnt, MAX_SIZES_CNT);
+    for (size_t i = 0; i < count; i++) {
+        if (maxProcessedDim ==
+                gCamCapability[mCameraId]->picture_sizes_tbl[i].width *
+                gCamCapability[mCameraId]->picture_sizes_tbl[i].height) {
+            mMinProcessedFrameDuration = gCamCapability[mCameraId]->picture_min_duration[i];
+            mMinJpegFrameDuration = gCamCapability[mCameraId]->picture_min_duration[i];
+            break;
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getMinFrameDuration
+ *
+ * DESCRIPTION: get minimum frame draution based on the current maximum frame durations
+ *              and current request configuration.
+ *
+ * PARAMETERS : @request: requset sent by the frameworks
+ *
+ * RETURN     : min farme duration for a particular request
+ *
+ *==========================================================================*/
+int64_t QCamera3HardwareInterface::getMinFrameDuration(const camera3_capture_request_t *request)
+{
+    bool hasJpegStream = false;
+    bool hasRawStream = false;
+    for (uint32_t i = 0; i < request->num_output_buffers; i ++) {
+        const camera3_stream_t *stream = request->output_buffers[i].stream;
+        if (stream->format == HAL_PIXEL_FORMAT_BLOB)
+            hasJpegStream = true;
+        else if (stream->format == HAL_PIXEL_FORMAT_RAW_OPAQUE ||
+                stream->format == HAL_PIXEL_FORMAT_RAW10 ||
+                stream->format == HAL_PIXEL_FORMAT_RAW16)
+            hasRawStream = true;
+    }
+
+    if (!hasJpegStream)
+        return MAX(mMinRawFrameDuration, mMinProcessedFrameDuration);
+    else
+        return MAX(MAX(mMinRawFrameDuration, mMinProcessedFrameDuration), mMinJpegFrameDuration);
+}
+
+/*===========================================================================
+ * FUNCTION   : handlePendingReprocResults
+ *
+ * DESCRIPTION: check and notify on any pending reprocess results
+ *
+ * PARAMETERS :
+ *   @frame_number   : Pending request frame number
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::handlePendingReprocResults(uint32_t frame_number)
+{
+    for (List<PendingReprocessResult>::iterator j = mPendingReprocessResultList.begin();
+            j != mPendingReprocessResultList.end(); j++) {
+        if (j->frame_number == frame_number) {
+            mCallbackOps->notify(mCallbackOps, &j->notify_msg);
+
+            CDBG("%s: Delayed reprocess notify %d", __func__,
+                    frame_number);
+
+            for (pendingRequestIterator k = mPendingRequestsList.begin();
+                    k != mPendingRequestsList.end(); k++) {
+
+                if (k->frame_number == j->frame_number) {
+                    CDBG("%s: Found reprocess frame number %d in pending reprocess List "
+                            "Take it out!!", __func__,
+                            k->frame_number);
+
+                    camera3_capture_result result;
+                    memset(&result, 0, sizeof(camera3_capture_result));
+                    result.frame_number = frame_number;
+                    result.num_output_buffers = 1;
+                    result.output_buffers =  &j->buffer;
+                    result.input_buffer = k->input_buffer;
+                    result.result = k->settings;
+                    result.partial_result = PARTIAL_RESULT_COUNT;
+                    mCallbackOps->process_capture_result(mCallbackOps, &result);
+
+                    erasePendingRequest(k);
+                    break;
+                }
+            }
+            mPendingReprocessResultList.erase(j);
+            break;
+        }
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : handleBatchMetadata
+ *
+ * DESCRIPTION: Handles metadata buffer callback in batch mode
+ *
+ * PARAMETERS : @metadata_buf: metadata buffer
+ *              @free_and_bufdone_meta_buf: Buf done on the meta buf and free
+ *                 the meta buf in this method
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::handleBatchMetadata(
+        mm_camera_super_buf_t *metadata_buf, bool free_and_bufdone_meta_buf)
+{
+    ATRACE_CALL();
+
+    if (NULL == metadata_buf) {
+        ALOGE("%s: metadata_buf is NULL", __func__);
+        return;
+    }
+    /* In batch mode, the metdata will contain the frame number and timestamp of
+     * the last frame in the batch. Eg: a batch containing buffers from request
+     * 5,6,7 and 8 will have frame number and timestamp corresponding to 8.
+     * multiple process_capture_requests => 1 set_param => 1 handleBatchMetata =>
+     * multiple process_capture_results */
+    metadata_buffer_t *metadata =
+            (metadata_buffer_t *)metadata_buf->bufs[0]->buffer;
+    int32_t frame_number_valid = 0, urgent_frame_number_valid = 0;
+    uint32_t last_frame_number = 0, last_urgent_frame_number = 0;
+    uint32_t first_frame_number = 0, first_urgent_frame_number = 0;
+    uint32_t frame_number = 0, urgent_frame_number = 0;
+    int64_t last_frame_capture_time = 0, first_frame_capture_time, capture_time;
+    bool invalid_metadata = false;
+    size_t urgentFrameNumDiff = 0, frameNumDiff = 0;
+    size_t loopCount = 1;
+
+    int32_t *p_frame_number_valid =
+            POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER_VALID, metadata);
+    uint32_t *p_frame_number =
+            POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER, metadata);
+    int64_t *p_capture_time =
+            POINTER_OF_META(CAM_INTF_META_SENSOR_TIMESTAMP, metadata);
+    int32_t *p_urgent_frame_number_valid =
+            POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER_VALID, metadata);
+    uint32_t *p_urgent_frame_number =
+            POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER, metadata);
+
+    if ((NULL == p_frame_number_valid) || (NULL == p_frame_number) ||
+            (NULL == p_capture_time) || (NULL == p_urgent_frame_number_valid) ||
+            (NULL == p_urgent_frame_number)) {
+        ALOGE("%s: Invalid metadata", __func__);
+        invalid_metadata = true;
+    } else {
+        frame_number_valid = *p_frame_number_valid;
+        last_frame_number = *p_frame_number;
+        last_frame_capture_time = *p_capture_time;
+        urgent_frame_number_valid = *p_urgent_frame_number_valid;
+        last_urgent_frame_number = *p_urgent_frame_number;
+    }
+
+    /* In batchmode, when no video buffers are requested, set_parms are sent
+     * for every capture_request. The difference between consecutive urgent
+     * frame numbers and frame numbers should be used to interpolate the
+     * corresponding frame numbers and time stamps */
+    pthread_mutex_lock(&mMutex);
+    if (urgent_frame_number_valid) {
+        first_urgent_frame_number =
+                mPendingBatchMap.valueFor(last_urgent_frame_number);
+        urgentFrameNumDiff = last_urgent_frame_number + 1 -
+                first_urgent_frame_number;
+
+        CDBG("%s: urgent_frm: valid: %d frm_num: %d - %d",
+                __func__, urgent_frame_number_valid,
+                first_urgent_frame_number, last_urgent_frame_number);
+    }
+
+    if (frame_number_valid) {
+        first_frame_number = mPendingBatchMap.valueFor(last_frame_number);
+        frameNumDiff = last_frame_number + 1 -
+                first_frame_number;
+        mPendingBatchMap.removeItem(last_frame_number);
+
+        CDBG("%s:        frm: valid: %d frm_num: %d - %d",
+                __func__, frame_number_valid,
+                first_frame_number, last_frame_number);
+
+    }
+    pthread_mutex_unlock(&mMutex);
+
+    if (urgent_frame_number_valid || frame_number_valid) {
+        loopCount = MAX(urgentFrameNumDiff, frameNumDiff);
+        if (urgentFrameNumDiff > MAX_HFR_BATCH_SIZE)
+            ALOGE("%s: urgentFrameNumDiff: %d urgentFrameNum: %d",
+                    __func__, urgentFrameNumDiff, last_urgent_frame_number);
+        if (frameNumDiff > MAX_HFR_BATCH_SIZE)
+            ALOGE("%s: frameNumDiff: %d frameNum: %d",
+                    __func__, frameNumDiff, last_frame_number);
+    }
+
+    for (size_t i = 0; i < loopCount; i++) {
+        /* handleMetadataWithLock is called even for invalid_metadata for
+         * pipeline depth calculation */
+        if (!invalid_metadata) {
+            /* Infer frame number. Batch metadata contains frame number of the
+             * last frame */
+            if (urgent_frame_number_valid) {
+                if (i < urgentFrameNumDiff) {
+                    urgent_frame_number =
+                            first_urgent_frame_number + i;
+                    CDBG("%s: inferred urgent frame_number: %d",
+                            __func__, urgent_frame_number);
+                    ADD_SET_PARAM_ENTRY_TO_BATCH(metadata,
+                            CAM_INTF_META_URGENT_FRAME_NUMBER, urgent_frame_number);
+                } else {
+                    /* This is to handle when urgentFrameNumDiff < frameNumDiff */
+                    ADD_SET_PARAM_ENTRY_TO_BATCH(metadata,
+                            CAM_INTF_META_URGENT_FRAME_NUMBER_VALID, 0);
+                }
+            }
+
+            /* Infer frame number. Batch metadata contains frame number of the
+             * last frame */
+            if (frame_number_valid) {
+                if (i < frameNumDiff) {
+                    frame_number = first_frame_number + i;
+                    CDBG("%s: inferred frame_number: %d", __func__, frame_number);
+                    ADD_SET_PARAM_ENTRY_TO_BATCH(metadata,
+                            CAM_INTF_META_FRAME_NUMBER, frame_number);
+                } else {
+                    /* This is to handle when urgentFrameNumDiff > frameNumDiff */
+                    ADD_SET_PARAM_ENTRY_TO_BATCH(metadata,
+                             CAM_INTF_META_FRAME_NUMBER_VALID, 0);
+                }
+            }
+
+            if (last_frame_capture_time) {
+                //Infer timestamp
+                first_frame_capture_time = last_frame_capture_time -
+                        (((loopCount - 1) * NSEC_PER_SEC) / mHFRVideoFps);
+                capture_time =
+                        first_frame_capture_time + (i * NSEC_PER_SEC / mHFRVideoFps);
+                ADD_SET_PARAM_ENTRY_TO_BATCH(metadata,
+                        CAM_INTF_META_SENSOR_TIMESTAMP, capture_time);
+                CDBG("%s: batch capture_time: %lld, capture_time: %lld",
+                        __func__, last_frame_capture_time, capture_time);
+            }
+        }
+        pthread_mutex_lock(&mMutex);
+        handleMetadataWithLock(metadata_buf,
+                false /* free_and_bufdone_meta_buf */);
+        pthread_mutex_unlock(&mMutex);
+    }
+
+done_batch_metadata:
+    /* BufDone metadata buffer */
+    if (free_and_bufdone_meta_buf) {
+        mMetadataChannel->bufDone(metadata_buf);
+        free(metadata_buf);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : handleMetadataWithLock
+ *
+ * DESCRIPTION: Handles metadata buffer callback with mMutex lock held.
+ *
+ * PARAMETERS : @metadata_buf: metadata buffer
+ *              @free_and_bufdone_meta_buf: Buf done on the meta buf and free
+ *                 the meta buf in this method
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::handleMetadataWithLock(
+    mm_camera_super_buf_t *metadata_buf, bool free_and_bufdone_meta_buf)
+{
+    ATRACE_CALL();
+
+    metadata_buffer_t *metadata = (metadata_buffer_t *)metadata_buf->bufs[0]->buffer;
+    int32_t frame_number_valid, urgent_frame_number_valid;
+    uint32_t frame_number, urgent_frame_number;
+    int64_t capture_time;
+
+    int32_t *p_frame_number_valid =
+            POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER_VALID, metadata);
+    uint32_t *p_frame_number = POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER, metadata);
+    int64_t *p_capture_time = POINTER_OF_META(CAM_INTF_META_SENSOR_TIMESTAMP, metadata);
+    int32_t *p_urgent_frame_number_valid =
+            POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER_VALID, metadata);
+    uint32_t *p_urgent_frame_number =
+            POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER, metadata);
+    IF_META_AVAILABLE(cam_frame_dropped_t, p_cam_frame_drop, CAM_INTF_META_FRAME_DROPPED,
+            metadata) {
+        CDBG("%s: Dropped frame info for frame_number_valid %d, frame_number %d",
+                __func__, *p_frame_number_valid, *p_frame_number);
+    }
+
+    if ((NULL == p_frame_number_valid) || (NULL == p_frame_number) || (NULL == p_capture_time) ||
+            (NULL == p_urgent_frame_number_valid) || (NULL == p_urgent_frame_number)) {
+        ALOGE("%s: Invalid metadata", __func__);
+        if (free_and_bufdone_meta_buf) {
+            mMetadataChannel->bufDone(metadata_buf);
+            free(metadata_buf);
+        }
+        goto done_metadata;
+    } else {
+        frame_number_valid = *p_frame_number_valid;
+        frame_number = *p_frame_number;
+        capture_time = *p_capture_time;
+        urgent_frame_number_valid = *p_urgent_frame_number_valid;
+        urgent_frame_number = *p_urgent_frame_number;
+    }
+    //Partial result on process_capture_result for timestamp
+    if (urgent_frame_number_valid) {
+        CDBG("%s: valid urgent frame_number = %u, capture_time = %lld",
+          __func__, urgent_frame_number, capture_time);
+
+        //Recieved an urgent Frame Number, handle it
+        //using partial results
+        for (pendingRequestIterator i =
+                mPendingRequestsList.begin(); i != mPendingRequestsList.end(); i++) {
+            CDBG("%s: Iterator Frame = %d urgent frame = %d",
+                __func__, i->frame_number, urgent_frame_number);
+
+            if ((!i->input_buffer) && (i->frame_number < urgent_frame_number) &&
+                (i->partial_result_cnt == 0)) {
+                ALOGE("%s: Error: HAL missed urgent metadata for frame number %d",
+                    __func__, i->frame_number);
+            }
+
+            if (i->frame_number == urgent_frame_number &&
+                     i->bUrgentReceived == 0) {
+
+                camera3_capture_result_t result;
+                memset(&result, 0, sizeof(camera3_capture_result_t));
+
+                i->partial_result_cnt++;
+                i->bUrgentReceived = 1;
+                // Extract 3A metadata
+                result.result =
+                    translateCbUrgentMetadataToResultMetadata(metadata);
+                // Populate metadata result
+                result.frame_number = urgent_frame_number;
+                result.num_output_buffers = 0;
+                result.output_buffers = NULL;
+                result.partial_result = i->partial_result_cnt;
+
+                mCallbackOps->process_capture_result(mCallbackOps, &result);
+                CDBG("%s: urgent frame_number = %u, capture_time = %lld",
+                     __func__, result.frame_number, capture_time);
+                free_camera_metadata((camera_metadata_t *)result.result);
+                break;
+            }
+        }
+    }
+
+    if (!frame_number_valid) {
+        CDBG("%s: Not a valid normal frame number, used as SOF only", __func__);
+        if (free_and_bufdone_meta_buf) {
+            mMetadataChannel->bufDone(metadata_buf);
+            free(metadata_buf);
+        }
+        goto done_metadata;
+    }
+    CDBG_HIGH("%s: valid frame_number = %u, capture_time = %lld", __func__,
+            frame_number, capture_time);
+
+    for (pendingRequestIterator i = mPendingRequestsList.begin();
+            i != mPendingRequestsList.end() && i->frame_number <= frame_number;) {
+        // Flush out all entries with less or equal frame numbers.
+
+        camera3_capture_result_t result;
+        memset(&result, 0, sizeof(camera3_capture_result_t));
+
+        CDBG("%s: frame_number in the list is %u", __func__, i->frame_number);
+        i->partial_result_cnt++;
+        result.partial_result = i->partial_result_cnt;
+
+        // Check whether any stream buffer corresponding to this is dropped or not
+        // If dropped, then send the ERROR_BUFFER for the corresponding stream
+        // The API does not expect a blob buffer to be dropped
+        if (p_cam_frame_drop && p_cam_frame_drop->frame_dropped) {
+            /* Clear notify_msg structure */
+            camera3_notify_msg_t notify_msg;
+            memset(&notify_msg, 0, sizeof(camera3_notify_msg_t));
+            for (List<RequestedBufferInfo>::iterator j = i->buffers.begin();
+                    j != i->buffers.end(); j++) {
+               if (j->stream->format != HAL_PIXEL_FORMAT_BLOB) {
+                   QCamera3ProcessingChannel *channel = (QCamera3ProcessingChannel *)j->stream->priv;
+                   uint32_t streamID = channel->getStreamID(channel->getStreamTypeMask());
+                   for (uint32_t k = 0; k < p_cam_frame_drop->cam_stream_ID.num_streams; k++) {
+                       if (streamID == p_cam_frame_drop->cam_stream_ID.streamID[k]) {
+                           // Send Error notify to frameworks with CAMERA3_MSG_ERROR_BUFFER
+                           ALOGW("%s: Start of reporting error frame#=%u, streamID=%u",
+                                   __func__, i->frame_number, streamID);
+                           notify_msg.type = CAMERA3_MSG_ERROR;
+                           notify_msg.message.error.frame_number = i->frame_number;
+                           notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_BUFFER ;
+                           notify_msg.message.error.error_stream = j->stream;
+                           mCallbackOps->notify(mCallbackOps, &notify_msg);
+                           ALOGW("%s: End of reporting error frame#=%u, streamID=%u",
+                                  __func__, i->frame_number, streamID);
+                           PendingFrameDropInfo PendingFrameDrop;
+                           PendingFrameDrop.frame_number=i->frame_number;
+                           PendingFrameDrop.stream_ID = streamID;
+                           // Add the Frame drop info to mPendingFrameDropList
+                           mPendingFrameDropList.push_back(PendingFrameDrop);
+                      }
+                   }
+               } else {
+                   ALOGE("%s: JPEG buffer dropped for frame number %d",
+                           __func__, i->frame_number);
+               }
+            }
+        }
+
+        // Send empty metadata with already filled buffers for dropped metadata
+        // and send valid metadata with already filled buffers for current metadata
+        /* we could hit this case when we either
+         * 1. have a pending reprocess request or
+         * 2. miss a metadata buffer callback */
+        if (i->frame_number < frame_number) {
+            if (i->input_buffer) {
+                /* this will be handled in handleInputBufferWithLock */
+                i++;
+                continue;
+            } else {
+                ALOGE("%s: Fatal: Missing metadata buffer for frame number %d", __func__, i->frame_number);
+                if (free_and_bufdone_meta_buf) {
+                    mMetadataChannel->bufDone(metadata_buf);
+                    free(metadata_buf);
+                }
+                camera3_notify_msg_t notify_msg;
+                memset(&notify_msg, 0, sizeof(notify_msg));
+                notify_msg.type = CAMERA3_MSG_ERROR;
+                notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_DEVICE;
+                mCallbackOps->notify(mCallbackOps, &notify_msg);
+                goto done_metadata;
+            }
+        } else {
+            mPendingLiveRequest--;
+            /* Clear notify_msg structure */
+            camera3_notify_msg_t notify_msg;
+            memset(&notify_msg, 0, sizeof(camera3_notify_msg_t));
+
+            // Send shutter notify to frameworks
+            notify_msg.type = CAMERA3_MSG_SHUTTER;
+            notify_msg.message.shutter.frame_number = i->frame_number;
+            notify_msg.message.shutter.timestamp = (uint64_t)capture_time;
+            mCallbackOps->notify(mCallbackOps, &notify_msg);
+
+            i->timestamp = capture_time;
+
+            // Find channel requiring metadata, meaning internal offline postprocess
+            // is needed.
+            //TODO: for now, we don't support two streams requiring metadata at the same time.
+            // (because we are not making copies, and metadata buffer is not reference counted.
+            bool internalPproc = false;
+            for (pendingBufferIterator iter = i->buffers.begin();
+                    iter != i->buffers.end(); iter++) {
+                if (iter->need_metadata) {
+                    internalPproc = true;
+                    QCamera3ProcessingChannel *channel =
+                            (QCamera3ProcessingChannel *)iter->stream->priv;
+                    channel->queueReprocMetadata(metadata_buf);
+                    break;
+                }
+            }
+
+            result.result = translateFromHalMetadata(metadata,
+                    i->timestamp, i->request_id, i->jpegMetadata, i->pipeline_depth,
+                    i->capture_intent, internalPproc);
+
+            saveExifParams(metadata);
+
+            if (i->blob_request) {
+                {
+                    //Dump tuning metadata if enabled and available
+                    char prop[PROPERTY_VALUE_MAX];
+                    memset(prop, 0, sizeof(prop));
+                    property_get("persist.camera.dumpmetadata", prop, "0");
+                    int32_t enabled = atoi(prop);
+                    if (enabled && metadata->is_tuning_params_valid) {
+                        dumpMetadataToFile(metadata->tuning_params,
+                               mMetaFrameCount,
+                               enabled,
+                               "Snapshot",
+                               frame_number);
+                    }
+                }
+            }
+
+            if (!internalPproc) {
+                CDBG("%s: couldn't find need_metadata for this metadata", __func__);
+                // Return metadata buffer
+                if (free_and_bufdone_meta_buf) {
+                    mMetadataChannel->bufDone(metadata_buf);
+                    free(metadata_buf);
+                }
+            }
+        }
+        if (!result.result) {
+            ALOGE("%s: metadata is NULL", __func__);
+        }
+        result.frame_number = i->frame_number;
+        result.input_buffer = i->input_buffer;
+        result.num_output_buffers = 0;
+        result.output_buffers = NULL;
+        for (List<RequestedBufferInfo>::iterator j = i->buffers.begin();
+                    j != i->buffers.end(); j++) {
+            if (j->buffer) {
+                result.num_output_buffers++;
+            }
+        }
+
+        if (result.num_output_buffers > 0) {
+            camera3_stream_buffer_t *result_buffers =
+                new camera3_stream_buffer_t[result.num_output_buffers];
+            if (!result_buffers) {
+                ALOGE("%s: Fatal error: out of memory", __func__);
+            }
+            size_t result_buffers_idx = 0;
+            for (List<RequestedBufferInfo>::iterator j = i->buffers.begin();
+                    j != i->buffers.end(); j++) {
+                if (j->buffer) {
+                    for (List<PendingFrameDropInfo>::iterator m = mPendingFrameDropList.begin();
+                            m != mPendingFrameDropList.end(); m++) {
+                        QCamera3Channel *channel = (QCamera3Channel *)j->buffer->stream->priv;
+                        uint32_t streamID = channel->getStreamID(channel->getStreamTypeMask());
+                        if((m->stream_ID == streamID) && (m->frame_number==frame_number)) {
+                            j->buffer->status=CAMERA3_BUFFER_STATUS_ERROR;
+                            ALOGW("%s: Stream STATUS_ERROR frame_number=%u, streamID=%u",
+                                  __func__, frame_number, streamID);
+                            m = mPendingFrameDropList.erase(m);
+                            break;
+                        }
+                    }
+
+                    for (List<PendingBufferInfo>::iterator k =
+                      mPendingBuffersMap.mPendingBufferList.begin();
+                      k != mPendingBuffersMap.mPendingBufferList.end(); k++) {
+                      if (k->buffer == j->buffer->buffer) {
+                        CDBG("%s: Found buffer %p in pending buffer List "
+                              "for frame %u, Take it out!!", __func__,
+                               k->buffer, k->frame_number);
+                        mPendingBuffersMap.num_buffers--;
+                        k = mPendingBuffersMap.mPendingBufferList.erase(k);
+                        break;
+                      }
+                    }
+
+                    result_buffers[result_buffers_idx++] = *(j->buffer);
+                    free(j->buffer);
+                    j->buffer = NULL;
+                }
+            }
+            result.output_buffers = result_buffers;
+            mCallbackOps->process_capture_result(mCallbackOps, &result);
+            CDBG("%s %d: meta frame_number = %u, capture_time = %lld",
+                    __func__, __LINE__, result.frame_number, i->timestamp);
+            free_camera_metadata((camera_metadata_t *)result.result);
+            delete[] result_buffers;
+        } else {
+            mCallbackOps->process_capture_result(mCallbackOps, &result);
+            CDBG("%s %d: meta frame_number = %u, capture_time = %lld",
+                        __func__, __LINE__, result.frame_number, i->timestamp);
+            free_camera_metadata((camera_metadata_t *)result.result);
+        }
+
+        i = erasePendingRequest(i);
+
+        if (!mPendingReprocessResultList.empty()) {
+            handlePendingReprocResults(frame_number + 1);
+        }
+    }
+
+done_metadata:
+    for (pendingRequestIterator i = mPendingRequestsList.begin();
+            i != mPendingRequestsList.end() ;i++) {
+        i->pipeline_depth++;
+    }
+    CDBG("%s: mPendingLiveRequest = %d", __func__, mPendingLiveRequest);
+    unblockRequestIfNecessary();
+
+}
+
+/*===========================================================================
+ * FUNCTION   : hdrPlusPerfLock
+ *
+ * DESCRIPTION: perf lock for HDR+ using custom intent
+ *
+ * PARAMETERS : @metadata_buf: Metadata super_buf pointer
+ *
+ * RETURN     : None
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::hdrPlusPerfLock(
+        mm_camera_super_buf_t *metadata_buf)
+{
+    if (NULL == metadata_buf) {
+        ALOGE("%s: metadata_buf is NULL", __func__);
+        return;
+    }
+    metadata_buffer_t *metadata =
+            (metadata_buffer_t *)metadata_buf->bufs[0]->buffer;
+    int32_t *p_frame_number_valid =
+            POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER_VALID, metadata);
+    uint32_t *p_frame_number =
+            POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER, metadata);
+
+    //acquire perf lock for 5 sec after the last HDR frame is captured
+    if (*p_frame_number_valid) {
+        if (mLastCustIntentFrmNum == (int32_t)*p_frame_number) {
+            m_perfLock.lock_acq_timed(HDR_PLUS_PERF_TIME_OUT);
+        }
+    }
+
+    //release lock after perf lock timer is expired. If lock is already released,
+    //isTimerReset returns false
+    if (m_perfLock.isTimerReset()) {
+        mLastCustIntentFrmNum = -1;
+        m_perfLock.lock_rel_timed();
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : handleInputBufferWithLock
+ *
+ * DESCRIPTION: Handles input buffer and shutter callback with mMutex lock held.
+ *
+ * PARAMETERS : @frame_number: frame number of the input buffer
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::handleInputBufferWithLock(uint32_t frame_number)
+{
+    ATRACE_CALL();
+    pendingRequestIterator i = mPendingRequestsList.begin();
+    while (i != mPendingRequestsList.end() && i->frame_number != frame_number){
+        i++;
+    }
+    if (i != mPendingRequestsList.end() && i->input_buffer) {
+        //found the right request
+        if (!i->shutter_notified) {
+            CameraMetadata settings;
+            camera3_notify_msg_t notify_msg;
+            memset(&notify_msg, 0, sizeof(camera3_notify_msg_t));
+            nsecs_t capture_time = systemTime(CLOCK_MONOTONIC);
+            if(i->settings) {
+                settings = i->settings;
+                if (settings.exists(ANDROID_SENSOR_TIMESTAMP)) {
+                    capture_time = settings.find(ANDROID_SENSOR_TIMESTAMP).data.i64[0];
+                } else {
+                    ALOGE("%s: No timestamp in input settings! Using current one.",
+                            __func__);
+                }
+            } else {
+                ALOGE("%s: Input settings missing!", __func__);
+            }
+
+            notify_msg.type = CAMERA3_MSG_SHUTTER;
+            notify_msg.message.shutter.frame_number = frame_number;
+            notify_msg.message.shutter.timestamp = (uint64_t)capture_time;
+            mCallbackOps->notify(mCallbackOps, &notify_msg);
+            i->shutter_notified = true;
+            CDBG("%s: Input request metadata notify frame_number = %u, capture_time = %llu",
+                       __func__, i->frame_number, notify_msg.message.shutter.timestamp);
+        }
+
+        if (i->input_buffer->release_fence != -1) {
+           int32_t rc = sync_wait(i->input_buffer->release_fence, TIMEOUT_NEVER);
+           close(i->input_buffer->release_fence);
+           if (rc != OK) {
+               ALOGE("%s: input buffer sync wait failed %d", __func__, rc);
+           }
+        }
+
+        camera3_capture_result result;
+        memset(&result, 0, sizeof(camera3_capture_result));
+        result.frame_number = frame_number;
+        result.result = i->settings;
+        result.input_buffer = i->input_buffer;
+        result.partial_result = PARTIAL_RESULT_COUNT;
+
+        mCallbackOps->process_capture_result(mCallbackOps, &result);
+        CDBG("%s: Input request metadata and input buffer frame_number = %u",
+                       __func__, i->frame_number);
+        i = erasePendingRequest(i);
+    } else {
+        ALOGE("%s: Could not find input request for frame number %d", __func__, frame_number);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : handleBufferWithLock
+ *
+ * DESCRIPTION: Handles image buffer callback with mMutex lock held.
+ *
+ * PARAMETERS : @buffer: image buffer for the callback
+ *              @frame_number: frame number of the image buffer
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::handleBufferWithLock(
+    camera3_stream_buffer_t *buffer, uint32_t frame_number)
+{
+    ATRACE_CALL();
+    // If the frame number doesn't exist in the pending request list,
+    // directly send the buffer to the frameworks, and update pending buffers map
+    // Otherwise, book-keep the buffer.
+    pendingRequestIterator i = mPendingRequestsList.begin();
+    while (i != mPendingRequestsList.end() && i->frame_number != frame_number){
+        i++;
+    }
+    if (i == mPendingRequestsList.end()) {
+        // Verify all pending requests frame_numbers are greater
+        for (pendingRequestIterator j = mPendingRequestsList.begin();
+                j != mPendingRequestsList.end(); j++) {
+            if ((j->frame_number < frame_number) && !(j->input_buffer)) {
+                ALOGE("%s: Error: pending live frame number %d is smaller than %d",
+                        __func__, j->frame_number, frame_number);
+            }
+        }
+        camera3_capture_result_t result;
+        memset(&result, 0, sizeof(camera3_capture_result_t));
+        result.result = NULL;
+        result.frame_number = frame_number;
+        result.num_output_buffers = 1;
+        result.partial_result = 0;
+        for (List<PendingFrameDropInfo>::iterator m = mPendingFrameDropList.begin();
+                m != mPendingFrameDropList.end(); m++) {
+            QCamera3Channel *channel = (QCamera3Channel *)buffer->stream->priv;
+            uint32_t streamID = channel->getStreamID(channel->getStreamTypeMask());
+            if((m->stream_ID == streamID) && (m->frame_number==frame_number) ) {
+                buffer->status=CAMERA3_BUFFER_STATUS_ERROR;
+                CDBG("%s: Stream STATUS_ERROR frame_number=%d, streamID=%d",
+                        __func__, frame_number, streamID);
+                m = mPendingFrameDropList.erase(m);
+                break;
+            }
+        }
+        result.output_buffers = buffer;
+        CDBG_HIGH("%s: result frame_number = %d, buffer = %p",
+                __func__, frame_number, buffer->buffer);
+
+        for (List<PendingBufferInfo>::iterator k =
+                mPendingBuffersMap.mPendingBufferList.begin();
+                k != mPendingBuffersMap.mPendingBufferList.end(); k++ ) {
+            if (k->buffer == buffer->buffer) {
+                CDBG("%s: Found Frame buffer, take it out from list",
+                        __func__);
+
+                mPendingBuffersMap.num_buffers--;
+                k = mPendingBuffersMap.mPendingBufferList.erase(k);
+                break;
+            }
+        }
+        CDBG("%s: mPendingBuffersMap.num_buffers = %d",
+            __func__, mPendingBuffersMap.num_buffers);
+
+        mCallbackOps->process_capture_result(mCallbackOps, &result);
+    } else {
+        if (i->input_buffer) {
+            CameraMetadata settings;
+            camera3_notify_msg_t notify_msg;
+            memset(&notify_msg, 0, sizeof(camera3_notify_msg_t));
+            nsecs_t capture_time = systemTime(CLOCK_MONOTONIC);
+            if(i->settings) {
+                settings = i->settings;
+                if (settings.exists(ANDROID_SENSOR_TIMESTAMP)) {
+                    capture_time = settings.find(ANDROID_SENSOR_TIMESTAMP).data.i64[0];
+                } else {
+                    ALOGE("%s: No timestamp in input settings! Using current one.",
+                            __func__);
+                }
+            } else {
+                ALOGE("%s: Input settings missing!", __func__);
+            }
+
+            notify_msg.type = CAMERA3_MSG_SHUTTER;
+            notify_msg.message.shutter.frame_number = frame_number;
+            notify_msg.message.shutter.timestamp = (uint64_t)capture_time;
+
+            if (i->input_buffer->release_fence != -1) {
+               int32_t rc = sync_wait(i->input_buffer->release_fence, TIMEOUT_NEVER);
+               close(i->input_buffer->release_fence);
+               if (rc != OK) {
+               ALOGE("%s: input buffer sync wait failed %d", __func__, rc);
+               }
+            }
+
+            for (List<PendingBufferInfo>::iterator k =
+                    mPendingBuffersMap.mPendingBufferList.begin();
+                    k != mPendingBuffersMap.mPendingBufferList.end(); k++ ) {
+                if (k->buffer == buffer->buffer) {
+                    CDBG("%s: Found Frame buffer, take it out from list",
+                            __func__);
+
+                    mPendingBuffersMap.num_buffers--;
+                    k = mPendingBuffersMap.mPendingBufferList.erase(k);
+                    break;
+                }
+            }
+            CDBG("%s: mPendingBuffersMap.num_buffers = %d",
+                __func__, mPendingBuffersMap.num_buffers);
+
+            bool notifyNow = true;
+            for (pendingRequestIterator j = mPendingRequestsList.begin();
+                    j != mPendingRequestsList.end(); j++) {
+                if (j->frame_number < frame_number) {
+                    notifyNow = false;
+                    break;
+                }
+            }
+
+            if (notifyNow) {
+                camera3_capture_result result;
+                memset(&result, 0, sizeof(camera3_capture_result));
+                result.frame_number = frame_number;
+                result.result = i->settings;
+                result.input_buffer = i->input_buffer;
+                result.num_output_buffers = 1;
+                result.output_buffers = buffer;
+                result.partial_result = PARTIAL_RESULT_COUNT;
+
+                mCallbackOps->notify(mCallbackOps, &notify_msg);
+                mCallbackOps->process_capture_result(mCallbackOps, &result);
+                CDBG("%s: Notify reprocess now %d!", __func__, frame_number);
+                i = erasePendingRequest(i);
+            } else {
+                // Cache reprocess result for later
+                PendingReprocessResult pendingResult;
+                memset(&pendingResult, 0, sizeof(PendingReprocessResult));
+                pendingResult.notify_msg = notify_msg;
+                pendingResult.buffer = *buffer;
+                pendingResult.frame_number = frame_number;
+                mPendingReprocessResultList.push_back(pendingResult);
+                CDBG("%s: Cache reprocess result %d!", __func__, frame_number);
+            }
+        } else {
+            for (List<RequestedBufferInfo>::iterator j = i->buffers.begin();
+                j != i->buffers.end(); j++) {
+                if (j->stream == buffer->stream) {
+                    if (j->buffer != NULL) {
+                        ALOGE("%s: Error: buffer is already set", __func__);
+                    } else {
+                        j->buffer = (camera3_stream_buffer_t *)malloc(
+                            sizeof(camera3_stream_buffer_t));
+                        *(j->buffer) = *buffer;
+                        CDBG_HIGH("%s: cache buffer %p at result frame_number %d",
+                            __func__, buffer, frame_number);
+                    }
+                }
+            }
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : unblockRequestIfNecessary
+ *
+ * DESCRIPTION: Unblock capture_request if max_buffer hasn't been reached. Note
+ *              that mMutex is held when this function is called.
+ *
+ * PARAMETERS :
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::unblockRequestIfNecessary()
+{
+   // Unblock process_capture_request
+   pthread_cond_signal(&mRequestCond);
+}
+
+
+/*===========================================================================
+ * FUNCTION   : processCaptureRequest
+ *
+ * DESCRIPTION: process a capture request from camera service
+ *
+ * PARAMETERS :
+ *   @request : request from framework to process
+ *
+ * RETURN     :
+ *
+ *==========================================================================*/
+int QCamera3HardwareInterface::processCaptureRequest(
+                    camera3_capture_request_t *request)
+{
+    ATRACE_CALL();
+    int rc = NO_ERROR;
+    int32_t request_id;
+    CameraMetadata meta;
+    uint32_t minInFlightRequests = MIN_INFLIGHT_REQUESTS;
+    uint32_t maxInFlightRequests = MAX_INFLIGHT_REQUESTS;
+    bool isVidBufRequested = false;
+    camera3_stream_buffer_t *pInputBuffer = NULL;
+
+    pthread_mutex_lock(&mMutex);
+
+    rc = validateCaptureRequest(request);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: incoming request is not valid", __func__);
+        pthread_mutex_unlock(&mMutex);
+        return rc;
+    }
+
+    meta = request->settings;
+
+    // For first capture request, send capture intent, and
+    // stream on all streams
+    if (mFirstRequest) {
+        // send an unconfigure to the backend so that the isp
+        // resources are deallocated
+        if (!mFirstConfiguration) {
+            cam_stream_size_info_t stream_config_info;
+            int32_t hal_version = CAM_HAL_V3;
+            memset(&stream_config_info, 0, sizeof(cam_stream_size_info_t));
+            stream_config_info.buffer_info.min_buffers =
+                    MIN_INFLIGHT_REQUESTS;
+            stream_config_info.buffer_info.max_buffers =
+                    m_bIs4KVideo ? 0 : MAX_INFLIGHT_REQUESTS;
+            clear_metadata_buffer(mParameters);
+            ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters,
+                    CAM_INTF_PARM_HAL_VERSION, hal_version);
+            ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters,
+                    CAM_INTF_META_STREAM_INFO, stream_config_info);
+            rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle,
+                    mParameters);
+            if (rc < 0) {
+                ALOGE("%s: set_parms for unconfigure failed", __func__);
+                pthread_mutex_unlock(&mMutex);
+                return rc;
+            }
+        }
+        m_perfLock.lock_acq();
+        /* get eis information for stream configuration */
+        cam_is_type_t is_type;
+        char is_type_value[PROPERTY_VALUE_MAX];
+        property_get("persist.camera.is_type", is_type_value, "0");
+        is_type = static_cast<cam_is_type_t>(atoi(is_type_value));
+
+        if (meta.exists(ANDROID_CONTROL_CAPTURE_INTENT)) {
+            int32_t hal_version = CAM_HAL_V3;
+            uint8_t captureIntent =
+                meta.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0];
+            mCaptureIntent = captureIntent;
+            clear_metadata_buffer(mParameters);
+            ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_HAL_VERSION, hal_version);
+            ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_CAPTURE_INTENT, captureIntent);
+        }
+
+        //If EIS is enabled, turn it on for video
+        bool setEis = m_bEisEnable && m_bEisSupportedSize;
+        int32_t vsMode;
+        vsMode = (setEis)? DIS_ENABLE: DIS_DISABLE;
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_DIS_ENABLE, vsMode)) {
+            rc = BAD_VALUE;
+        }
+
+        //IS type will be 0 unless EIS is supported. If EIS is supported
+        //it could either be 1 or 4 depending on the stream and video size
+        if (setEis) {
+            if (!m_bEisSupportedSize) {
+                is_type = IS_TYPE_DIS;
+            } else {
+                is_type = IS_TYPE_EIS_2_0;
+            }
+            mStreamConfigInfo.is_type = is_type;
+        } else {
+            mStreamConfigInfo.is_type = IS_TYPE_NONE;
+        }
+
+        ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters,
+                CAM_INTF_META_STREAM_INFO, mStreamConfigInfo);
+        int32_t tintless_value = 1;
+        ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters,
+                CAM_INTF_PARM_TINTLESS, tintless_value);
+        //Disable CDS for HFR mode and if mPprocBypass = true.
+        //CDS is a session parameter in the backend/ISP, so need to be set/reset
+        //after every configure_stream
+        if((CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE == mOpMode) ||
+                (m_bIsVideo)) {
+            int32_t cds = CAM_CDS_MODE_OFF;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters,
+                    CAM_INTF_PARM_CDS_MODE, cds))
+                ALOGE("%s: Failed to disable CDS for HFR mode", __func__);
+
+        }
+        setMobicat();
+
+        /* Set fps and hfr mode while sending meta stream info so that sensor
+         * can configure appropriate streaming mode */
+        mHFRVideoFps = DEFAULT_VIDEO_FPS;
+        if (meta.exists(ANDROID_CONTROL_AE_TARGET_FPS_RANGE)) {
+            rc = setHalFpsRange(meta, mParameters);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: setHalFpsRange failed", __func__);
+            }
+        }
+        if (meta.exists(ANDROID_CONTROL_MODE)) {
+            uint8_t metaMode = meta.find(ANDROID_CONTROL_MODE).data.u8[0];
+            rc = extractSceneMode(meta, metaMode, mParameters);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: extractSceneMode failed", __func__);
+            }
+        }
+
+        //TODO: validate the arguments, HSV scenemode should have only the
+        //advertised fps ranges
+
+        /*set the capture intent, hal version, tintless, stream info,
+         *and disenable parameters to the backend*/
+        CDBG("%s: set_parms META_STREAM_INFO ", __func__ );
+        mCameraHandle->ops->set_parms(mCameraHandle->camera_handle,
+                    mParameters);
+
+        cam_dimension_t sensor_dim;
+        memset(&sensor_dim, 0, sizeof(sensor_dim));
+        rc = getSensorOutputSize(sensor_dim);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Failed to get sensor output size", __func__);
+            pthread_mutex_unlock(&mMutex);
+            goto error_exit;
+        }
+
+        mCropRegionMapper.update(gCamCapability[mCameraId]->active_array_size.width,
+                gCamCapability[mCameraId]->active_array_size.height,
+                sensor_dim.width, sensor_dim.height);
+
+        /* Set batchmode before initializing channel. Since registerBuffer
+         * internally initializes some of the channels, better set batchmode
+         * even before first register buffer */
+        for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+            it != mStreamInfo.end(); it++) {
+            QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
+            if (((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask())
+                    && mBatchSize) {
+                rc = channel->setBatchSize(mBatchSize);
+                //Disable per frame map unmap for HFR/batchmode case
+                rc |= channel->setPerFrameMapUnmap(false);
+                if (NO_ERROR != rc) {
+                    ALOGE("%s : Channel init failed %d", __func__, rc);
+                    pthread_mutex_unlock(&mMutex);
+                    goto error_exit;
+                }
+            }
+        }
+
+        //First initialize all streams
+        for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+            it != mStreamInfo.end(); it++) {
+            QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
+            if ((((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask()) ||
+               ((1U << CAM_STREAM_TYPE_PREVIEW) == channel->getStreamTypeMask())) &&
+               setEis)
+                rc = channel->initialize(is_type);
+            else {
+                rc = channel->initialize(IS_TYPE_NONE);
+            }
+            if (NO_ERROR != rc) {
+                ALOGE("%s : Channel initialization failed %d", __func__, rc);
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+
+        if (mRawDumpChannel) {
+            rc = mRawDumpChannel->initialize(IS_TYPE_NONE);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: Error: Raw Dump Channel init failed", __func__);
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+        if (mSupportChannel) {
+            rc = mSupportChannel->initialize(IS_TYPE_NONE);
+            if (rc < 0) {
+                ALOGE("%s: Support channel initialization failed", __func__);
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+        if (mAnalysisChannel) {
+            rc = mAnalysisChannel->initialize(IS_TYPE_NONE);
+            if (rc < 0) {
+                ALOGE("%s: Analysis channel initialization failed", __func__);
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+        if (mDummyBatchChannel) {
+            rc = mDummyBatchChannel->setBatchSize(mBatchSize);
+            if (rc < 0) {
+                ALOGE("%s: mDummyBatchChannel setBatchSize failed", __func__);
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+            rc = mDummyBatchChannel->initialize(is_type);
+            if (rc < 0) {
+                ALOGE("%s: mDummyBatchChannel initialization failed", __func__);
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+
+        // Set bundle info
+        rc = setBundleInfo();
+        if (rc < 0) {
+            ALOGE("%s: setBundleInfo failed %d", __func__, rc);
+            pthread_mutex_unlock(&mMutex);
+            goto error_exit;
+        }
+
+        //Then start them.
+        CDBG_HIGH("%s: Start META Channel", __func__);
+        rc = mMetadataChannel->start();
+        if (rc < 0) {
+            ALOGE("%s: META channel start failed", __func__);
+            pthread_mutex_unlock(&mMutex);
+            goto error_exit;
+        }
+
+        if (mAnalysisChannel) {
+            rc = mAnalysisChannel->start();
+            if (rc < 0) {
+                ALOGE("%s: Analysis channel start failed", __func__);
+                mMetadataChannel->stop();
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+
+        if (mSupportChannel) {
+            rc = mSupportChannel->start();
+            if (rc < 0) {
+                ALOGE("%s: Support channel start failed", __func__);
+                mMetadataChannel->stop();
+                /* Although support and analysis are mutually exclusive today
+                   adding it in anycase for future proofing */
+                if (mAnalysisChannel) {
+                    mAnalysisChannel->stop();
+                }
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+        for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+            it != mStreamInfo.end(); it++) {
+            QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
+            CDBG_HIGH("%s: Start Processing Channel mask=%d",
+                    __func__, channel->getStreamTypeMask());
+            rc = channel->start();
+            if (rc < 0) {
+                ALOGE("%s: channel start failed", __func__);
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+
+        if (mRawDumpChannel) {
+            CDBG("%s: Starting raw dump stream",__func__);
+            rc = mRawDumpChannel->start();
+            if (rc != NO_ERROR) {
+                ALOGE("%s: Error Starting Raw Dump Channel", __func__);
+                for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+                      it != mStreamInfo.end(); it++) {
+                    QCamera3Channel *channel =
+                        (QCamera3Channel *)(*it)->stream->priv;
+                    ALOGE("%s: Stopping Processing Channel mask=%d", __func__,
+                        channel->getStreamTypeMask());
+                    channel->stop();
+                }
+                if (mSupportChannel)
+                    mSupportChannel->stop();
+                if (mAnalysisChannel) {
+                    mAnalysisChannel->stop();
+                }
+                mMetadataChannel->stop();
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+
+        if (mChannelHandle) {
+
+            rc = mCameraHandle->ops->start_channel(mCameraHandle->camera_handle,
+                    mChannelHandle);
+            if (rc != NO_ERROR) {
+                ALOGE("%s: start_channel failed %d", __func__, rc);
+                pthread_mutex_unlock(&mMutex);
+                goto error_exit;
+            }
+        }
+
+
+        goto no_error;
+error_exit:
+        m_perfLock.lock_rel();
+        return rc;
+no_error:
+        m_perfLock.lock_rel();
+
+        mWokenUpByDaemon = false;
+        mPendingLiveRequest = 0;
+        mFirstConfiguration = false;
+        enablePowerHint();
+    }
+
+    uint32_t frameNumber = request->frame_number;
+    cam_stream_ID_t streamID;
+
+    if (meta.exists(ANDROID_REQUEST_ID)) {
+        request_id = meta.find(ANDROID_REQUEST_ID).data.i32[0];
+        mCurrentRequestId = request_id;
+        CDBG("%s: Received request with id: %d",__func__, request_id);
+    } else if (mFirstRequest || mCurrentRequestId == -1){
+        ALOGE("%s: Unable to find request id field, \
+                & no previous id available", __func__);
+        pthread_mutex_unlock(&mMutex);
+        return NAME_NOT_FOUND;
+    } else {
+        CDBG("%s: Re-using old request id", __func__);
+        request_id = mCurrentRequestId;
+    }
+
+    CDBG_HIGH("%s: %d, num_output_buffers = %d input_buffer = %p frame_number = %d",
+                                    __func__, __LINE__,
+                                    request->num_output_buffers,
+                                    request->input_buffer,
+                                    frameNumber);
+    // Acquire all request buffers first
+    streamID.num_streams = 0;
+    int blob_request = 0;
+    uint32_t snapshotStreamId = 0;
+    for (size_t i = 0; i < request->num_output_buffers; i++) {
+        const camera3_stream_buffer_t& output = request->output_buffers[i];
+        QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
+
+        if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
+            //Call function to store local copy of jpeg data for encode params.
+            blob_request = 1;
+            snapshotStreamId = channel->getStreamID(channel->getStreamTypeMask());
+        }
+
+        if (output.acquire_fence != -1) {
+           rc = sync_wait(output.acquire_fence, TIMEOUT_NEVER);
+           close(output.acquire_fence);
+           if (rc != OK) {
+              ALOGE("%s: sync wait failed %d", __func__, rc);
+              pthread_mutex_unlock(&mMutex);
+              return rc;
+           }
+        }
+
+        streamID.streamID[streamID.num_streams] =
+            channel->getStreamID(channel->getStreamTypeMask());
+        streamID.num_streams++;
+
+        if ((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask()) {
+            isVidBufRequested = true;
+        }
+    }
+
+    if (blob_request && mRawDumpChannel) {
+        CDBG("%s: Trigger Raw based on blob request if Raw dump is enabled", __func__);
+        streamID.streamID[streamID.num_streams] =
+            mRawDumpChannel->getStreamID(mRawDumpChannel->getStreamTypeMask());
+        streamID.num_streams++;
+    }
+
+    if(request->input_buffer == NULL) {
+        /* Parse the settings:
+         * - For every request in NORMAL MODE
+         * - For every request in HFR mode during preview only case
+         * - For first request of every batch in HFR mode during video
+         * recording. In batchmode the same settings except frame number is
+         * repeated in each request of the batch.
+         */
+        if (!mBatchSize ||
+           (mBatchSize && !isVidBufRequested) ||
+           (mBatchSize && isVidBufRequested && !mToBeQueuedVidBufs)) {
+            rc = setFrameParameters(request, streamID, blob_request, snapshotStreamId);
+            if (rc < 0) {
+                ALOGE("%s: fail to set frame parameters", __func__);
+                pthread_mutex_unlock(&mMutex);
+                return rc;
+            }
+        }
+        /* For batchMode HFR, setFrameParameters is not called for every
+         * request. But only frame number of the latest request is parsed.
+         * Keep track of first and last frame numbers in a batch so that
+         * metadata for the frame numbers of batch can be duplicated in
+         * handleBatchMetadta */
+        if (mBatchSize) {
+            if (!mToBeQueuedVidBufs) {
+                //start of the batch
+                mFirstFrameNumberInBatch = request->frame_number;
+            }
+            if(ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters,
+                CAM_INTF_META_FRAME_NUMBER, request->frame_number)) {
+                ALOGE("%s: Failed to set the frame number in the parameters", __func__);
+                return BAD_VALUE;
+            }
+        }
+        if (mNeedSensorRestart) {
+            /* Unlock the mutex as restartSensor waits on the channels to be
+             * stopped, which in turn calls stream callback functions -
+             * handleBufferWithLock and handleMetadataWithLock */
+            pthread_mutex_unlock(&mMutex);
+            rc = dynamicUpdateMetaStreamInfo();
+            if (rc != NO_ERROR) {
+                ALOGE("%s: Restarting the sensor failed", __func__);
+                return BAD_VALUE;
+            }
+            mNeedSensorRestart = false;
+            pthread_mutex_lock(&mMutex);
+        }
+    } else {
+
+        if (request->input_buffer->acquire_fence != -1) {
+           rc = sync_wait(request->input_buffer->acquire_fence, TIMEOUT_NEVER);
+           close(request->input_buffer->acquire_fence);
+           if (rc != OK) {
+              ALOGE("%s: input buffer sync wait failed %d", __func__, rc);
+              pthread_mutex_unlock(&mMutex);
+              return rc;
+           }
+        }
+    }
+
+    if (mCaptureIntent == ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM) {
+        mLastCustIntentFrmNum = frameNumber;
+    }
+    /* Update pending request list and pending buffers map */
+    PendingRequestInfo pendingRequest;
+    pendingRequestIterator latestRequest;
+    pendingRequest.frame_number = frameNumber;
+    pendingRequest.num_buffers = request->num_output_buffers;
+    pendingRequest.request_id = request_id;
+    pendingRequest.blob_request = blob_request;
+    pendingRequest.timestamp = 0;
+    pendingRequest.bUrgentReceived = 0;
+    if (request->input_buffer) {
+        pendingRequest.input_buffer =
+                (camera3_stream_buffer_t*)malloc(sizeof(camera3_stream_buffer_t));
+        *(pendingRequest.input_buffer) = *(request->input_buffer);
+        pInputBuffer = pendingRequest.input_buffer;
+    } else {
+       pendingRequest.input_buffer = NULL;
+       pInputBuffer = NULL;
+    }
+
+    pendingRequest.pipeline_depth = 0;
+    pendingRequest.partial_result_cnt = 0;
+    extractJpegMetadata(mCurJpegMeta, request);
+    pendingRequest.jpegMetadata = mCurJpegMeta;
+    pendingRequest.settings = saveRequestSettings(mCurJpegMeta, request);
+    pendingRequest.shutter_notified = false;
+
+    //extract capture intent
+    if (meta.exists(ANDROID_CONTROL_CAPTURE_INTENT)) {
+        mCaptureIntent =
+                meta.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0];
+    }
+    pendingRequest.capture_intent = mCaptureIntent;
+
+    for (size_t i = 0; i < request->num_output_buffers; i++) {
+        RequestedBufferInfo requestedBuf;
+        memset(&requestedBuf, 0, sizeof(requestedBuf));
+        requestedBuf.stream = request->output_buffers[i].stream;
+        requestedBuf.buffer = NULL;
+        pendingRequest.buffers.push_back(requestedBuf);
+
+        // Add to buffer handle the pending buffers list
+        PendingBufferInfo bufferInfo;
+        bufferInfo.frame_number = frameNumber;
+        bufferInfo.buffer = request->output_buffers[i].buffer;
+        bufferInfo.stream = request->output_buffers[i].stream;
+        mPendingBuffersMap.mPendingBufferList.push_back(bufferInfo);
+        mPendingBuffersMap.num_buffers++;
+        QCamera3Channel *channel = (QCamera3Channel *)bufferInfo.stream->priv;
+        CDBG("%s: frame = %d, buffer = %p, streamTypeMask = %d, stream format = %d",
+                __func__, frameNumber, bufferInfo.buffer,
+                channel->getStreamTypeMask(), bufferInfo.stream->format);
+    }
+    latestRequest = mPendingRequestsList.insert(
+            mPendingRequestsList.end(), pendingRequest);
+    if(mFlush) {
+        pthread_mutex_unlock(&mMutex);
+        return NO_ERROR;
+    }
+
+    // Notify metadata channel we receive a request
+    mMetadataChannel->request(NULL, frameNumber);
+
+    if(request->input_buffer != NULL){
+        CDBG("%s: Input request, frame_number %d", __func__, frameNumber);
+        rc = setReprocParameters(request, &mReprocMeta, snapshotStreamId);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: fail to set reproc parameters", __func__);
+            pthread_mutex_unlock(&mMutex);
+            return rc;
+        }
+    }
+
+    // Call request on other streams
+    uint32_t streams_need_metadata = 0;
+    pendingBufferIterator pendingBufferIter = latestRequest->buffers.begin();
+    for (size_t i = 0; i < request->num_output_buffers; i++) {
+        const camera3_stream_buffer_t& output = request->output_buffers[i];
+        QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
+
+        if (channel == NULL) {
+            ALOGE("%s: invalid channel pointer for stream", __func__);
+            continue;
+        }
+
+        if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
+            if(request->input_buffer != NULL){
+                rc = channel->request(output.buffer, frameNumber,
+                        pInputBuffer, &mReprocMeta);
+                if (rc < 0) {
+                    ALOGE("%s: Fail to request on picture channel", __func__);
+                    pthread_mutex_unlock(&mMutex);
+                    return rc;
+                }
+            } else {
+                CDBG("%s: %d, snapshot request with buffer %p, frame_number %d", __func__,
+                        __LINE__, output.buffer, frameNumber);
+                if (!request->settings) {
+                    rc = channel->request(output.buffer, frameNumber,
+                            NULL, mPrevParameters);
+                } else {
+                    rc = channel->request(output.buffer, frameNumber,
+                            NULL, mParameters);
+                }
+                if (rc < 0) {
+                    ALOGE("%s: Fail to request on picture channel", __func__);
+                    pthread_mutex_unlock(&mMutex);
+                    return rc;
+                }
+                pendingBufferIter->need_metadata = true;
+                streams_need_metadata++;
+            }
+        } else if (output.stream->format == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+            bool needMetadata = false;
+            QCamera3YUVChannel *yuvChannel = (QCamera3YUVChannel *)channel;
+            rc = yuvChannel->request(output.buffer, frameNumber,
+                    pInputBuffer,
+                    (pInputBuffer ? &mReprocMeta : mParameters), needMetadata);
+            if (rc < 0) {
+                ALOGE("%s: Fail to request on YUV channel", __func__);
+                pthread_mutex_unlock(&mMutex);
+                return rc;
+            }
+            pendingBufferIter->need_metadata = needMetadata;
+            if (needMetadata)
+                streams_need_metadata += 1;
+            CDBG("%s: calling YUV channel request, need_metadata is %d",
+                    __func__, needMetadata);
+        } else {
+            CDBG("%s: %d, request with buffer %p, frame_number %d", __func__,
+                __LINE__, output.buffer, frameNumber);
+            rc = channel->request(output.buffer, frameNumber);
+            if (((1U << CAM_STREAM_TYPE_VIDEO) == channel->getStreamTypeMask())
+                    && mBatchSize) {
+                mToBeQueuedVidBufs++;
+                if (mToBeQueuedVidBufs == mBatchSize) {
+                    channel->queueBatchBuf();
+                }
+            }
+            if (rc < 0) {
+                ALOGE("%s: request failed", __func__);
+                pthread_mutex_unlock(&mMutex);
+                return rc;
+            }
+        }
+        pendingBufferIter++;
+    }
+
+    //If 2 streams have need_metadata set to true, fail the request, unless
+    //we copy/reference count the metadata buffer
+    if (streams_need_metadata > 1) {
+        ALOGE("%s: not supporting request in which two streams requires"
+                " 2 HAL metadata for reprocessing", __func__);
+        pthread_mutex_unlock(&mMutex);
+        return -EINVAL;
+    }
+
+    if(request->input_buffer == NULL) {
+        /* Set the parameters to backend:
+         * - For every request in NORMAL MODE
+         * - For every request in HFR mode during preview only case
+         * - Once every batch in HFR mode during video recording
+         */
+        if (!mBatchSize ||
+           (mBatchSize && !isVidBufRequested) ||
+           (mBatchSize && isVidBufRequested && (mToBeQueuedVidBufs == mBatchSize))) {
+            CDBG("%s: set_parms  batchSz: %d IsVidBufReq: %d vidBufTobeQd: %d ",
+                    __func__, mBatchSize, isVidBufRequested,
+                    mToBeQueuedVidBufs);
+            rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle,
+                    mParameters);
+            if (rc < 0) {
+                ALOGE("%s: set_parms failed", __func__);
+            }
+            /* reset to zero coz, the batch is queued */
+            mToBeQueuedVidBufs = 0;
+            mPendingBatchMap.add(frameNumber, mFirstFrameNumberInBatch);
+        }
+        mPendingLiveRequest++;
+    }
+
+    CDBG("%s: mPendingLiveRequest = %d", __func__, mPendingLiveRequest);
+
+    mFirstRequest = false;
+    // Added a timed condition wait
+    struct timespec ts;
+    uint8_t isValidTimeout = 1;
+    rc = clock_gettime(CLOCK_REALTIME, &ts);
+    if (rc < 0) {
+      isValidTimeout = 0;
+      ALOGE("%s: Error reading the real time clock!!", __func__);
+    }
+    else {
+      // Make timeout as 5 sec for request to be honored
+      ts.tv_sec += 5;
+    }
+    //Block on conditional variable
+    if (mBatchSize) {
+        /* For HFR, more buffers are dequeued upfront to improve the performance */
+        minInFlightRequests = MIN_INFLIGHT_HFR_REQUESTS;
+        maxInFlightRequests = MAX_INFLIGHT_HFR_REQUESTS;
+    }
+    while ((mPendingLiveRequest >= minInFlightRequests) && !pInputBuffer) {
+        if (!isValidTimeout) {
+            CDBG("%s: Blocking on conditional wait", __func__);
+            pthread_cond_wait(&mRequestCond, &mMutex);
+        }
+        else {
+            CDBG("%s: Blocking on timed conditional wait", __func__);
+            rc = pthread_cond_timedwait(&mRequestCond, &mMutex, &ts);
+            if (rc == ETIMEDOUT) {
+                rc = -ENODEV;
+                ALOGE("%s: Unblocked on timeout!!!!", __func__);
+                break;
+            }
+        }
+        CDBG("%s: Unblocked", __func__);
+        if (mWokenUpByDaemon) {
+            mWokenUpByDaemon = false;
+            if (mPendingLiveRequest < maxInFlightRequests)
+                break;
+        }
+    }
+    pthread_mutex_unlock(&mMutex);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : dump
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *
+ *
+ * RETURN     :
+ *==========================================================================*/
+void QCamera3HardwareInterface::dump(int fd)
+{
+    pthread_mutex_lock(&mMutex);
+    dprintf(fd, "\n Camera HAL3 information Begin \n");
+
+    dprintf(fd, "\nNumber of pending requests: %zu \n",
+        mPendingRequestsList.size());
+    dprintf(fd, "-------+-------------------+-------------+----------+---------------------\n");
+    dprintf(fd, " Frame | Number of Buffers |   Req Id:   | Blob Req | Input buffer present\n");
+    dprintf(fd, "-------+-------------------+-------------+----------+---------------------\n");
+    for(pendingRequestIterator i = mPendingRequestsList.begin();
+            i != mPendingRequestsList.end(); i++) {
+        dprintf(fd, " %5d | %17d | %11d | %8d | %p \n",
+        i->frame_number, i->num_buffers, i->request_id, i->blob_request,
+        i->input_buffer);
+    }
+    dprintf(fd, "\nPending buffer map: Number of buffers: %u\n",
+                mPendingBuffersMap.num_buffers);
+    dprintf(fd, "-------+------------------\n");
+    dprintf(fd, " Frame | Stream type mask \n");
+    dprintf(fd, "-------+------------------\n");
+    for(List<PendingBufferInfo>::iterator i =
+        mPendingBuffersMap.mPendingBufferList.begin();
+        i != mPendingBuffersMap.mPendingBufferList.end(); i++) {
+        QCamera3Channel *channel = (QCamera3Channel *)(i->stream->priv);
+        dprintf(fd, " %5d | %11d \n",
+                i->frame_number, channel->getStreamTypeMask());
+    }
+    dprintf(fd, "-------+------------------\n");
+
+    dprintf(fd, "\nPending frame drop list: %zu\n",
+        mPendingFrameDropList.size());
+    dprintf(fd, "-------+-----------\n");
+    dprintf(fd, " Frame | Stream ID \n");
+    dprintf(fd, "-------+-----------\n");
+    for(List<PendingFrameDropInfo>::iterator i = mPendingFrameDropList.begin();
+        i != mPendingFrameDropList.end(); i++) {
+        dprintf(fd, " %5d | %9d \n",
+            i->frame_number, i->stream_ID);
+    }
+    dprintf(fd, "-------+-----------\n");
+
+    dprintf(fd, "\n Camera HAL3 information End \n");
+
+    /* use dumpsys media.camera as trigger to send update debug level event */
+    mUpdateDebugLevel = true;
+    pthread_mutex_unlock(&mMutex);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : flush
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *
+ *
+ * RETURN     :
+ *==========================================================================*/
+int QCamera3HardwareInterface::flush()
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+
+    CDBG("%s: Unblocking Process Capture Request", __func__);
+    pthread_mutex_lock(&mMutex);
+
+    if (mFirstRequest) {
+        pthread_mutex_unlock(&mMutex);
+        return NO_ERROR;
+    }
+
+    mFlush = true;
+    pthread_mutex_unlock(&mMutex);
+
+    rc = stopAllChannels();
+    if (rc < 0) {
+        ALOGE("%s: stopAllChannels failed", __func__);
+        return rc;
+    }
+    if (mChannelHandle) {
+        mCameraHandle->ops->stop_channel(mCameraHandle->camera_handle,
+                mChannelHandle);
+    }
+
+    // Reset bundle info
+    rc = setBundleInfo();
+    if (rc < 0) {
+        ALOGE("%s: setBundleInfo failed %d", __func__, rc);
+        return rc;
+    }
+
+    // Mutex Lock
+    pthread_mutex_lock(&mMutex);
+
+    // Unblock process_capture_request
+    mPendingLiveRequest = 0;
+    pthread_cond_signal(&mRequestCond);
+
+    rc = notifyErrorForPendingRequests();
+    if (rc < 0) {
+        ALOGE("%s: notifyErrorForPendingRequests failed", __func__);
+        pthread_mutex_unlock(&mMutex);
+        return rc;
+    }
+
+    mFlush = false;
+
+    // Start the Streams/Channels
+    rc = startAllChannels();
+    if (rc < 0) {
+        ALOGE("%s: startAllChannels failed", __func__);
+        pthread_mutex_unlock(&mMutex);
+        return rc;
+    }
+
+    if (mChannelHandle) {
+        mCameraHandle->ops->start_channel(mCameraHandle->camera_handle,
+                    mChannelHandle);
+        if (rc < 0) {
+            ALOGE("%s: start_channel failed", __func__);
+            pthread_mutex_unlock(&mMutex);
+            return rc;
+        }
+    }
+
+    pthread_mutex_unlock(&mMutex);
+
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : captureResultCb
+ *
+ * DESCRIPTION: Callback handler for all capture result
+ *              (streams, as well as metadata)
+ *
+ * PARAMETERS :
+ *   @metadata : metadata information
+ *   @buffer   : actual gralloc buffer to be returned to frameworks.
+ *               NULL if metadata.
+ *
+ * RETURN     : NONE
+ *==========================================================================*/
+void QCamera3HardwareInterface::captureResultCb(mm_camera_super_buf_t *metadata_buf,
+                camera3_stream_buffer_t *buffer, uint32_t frame_number, bool isInputBuffer)
+{
+    if (metadata_buf) {
+        if (CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE == mOpMode) {
+            handleBatchMetadata(metadata_buf,
+                    true /* free_and_bufdone_meta_buf */);
+        } else { /* mBatchSize = 0 */
+            hdrPlusPerfLock(metadata_buf);
+            pthread_mutex_lock(&mMutex);
+            handleMetadataWithLock(metadata_buf,
+                    true /* free_and_bufdone_meta_buf */);
+            pthread_mutex_unlock(&mMutex);
+        }
+    } else if (isInputBuffer) {
+        pthread_mutex_lock(&mMutex);
+        handleInputBufferWithLock(frame_number);
+        pthread_mutex_unlock(&mMutex);
+    } else {
+        pthread_mutex_lock(&mMutex);
+        handleBufferWithLock(buffer, frame_number);
+        pthread_mutex_unlock(&mMutex);
+    }
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : getReprocessibleOutputStreamId
+ *
+ * DESCRIPTION: Get source output stream id for the input reprocess stream
+ *              based on size and format, which would be the largest
+ *              output stream if an input stream exists.
+ *
+ * PARAMETERS :
+ *   @id      : return the stream id if found
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::getReprocessibleOutputStreamId(uint32_t &id)
+{
+    stream_info_t* stream = NULL;
+
+    /* check if any output or bidirectional stream with the same size and format
+       and return that stream */
+    if ((mInputStreamInfo.dim.width > 0) &&
+            (mInputStreamInfo.dim.height > 0)) {
+        for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+                it != mStreamInfo.end(); it++) {
+
+            camera3_stream_t *stream = (*it)->stream;
+            if ((stream->width == (uint32_t)mInputStreamInfo.dim.width) &&
+                    (stream->height == (uint32_t)mInputStreamInfo.dim.height) &&
+                    (stream->format == mInputStreamInfo.format)) {
+                // Usage flag for an input stream and the source output stream
+                // may be different.
+                CDBG("%s: Found reprocessible output stream! %p", __func__, *it);
+                CDBG("%s: input stream usage 0x%x, current stream usage 0x%x",
+                        __func__, stream->usage, mInputStreamInfo.usage);
+
+                QCamera3Channel *channel = (QCamera3Channel *)stream->priv;
+                if (channel != NULL && channel->mStreams[0]) {
+                    id = channel->mStreams[0]->getMyServerID();
+                    return NO_ERROR;
+                }
+            }
+        }
+    } else {
+        CDBG("%s: No input stream, so no reprocessible output stream", __func__);
+    }
+    return NAME_NOT_FOUND;
+}
+
+/*===========================================================================
+ * FUNCTION   : lookupFwkName
+ *
+ * DESCRIPTION: In case the enum is not same in fwk and backend
+ *              make sure the parameter is correctly propogated
+ *
+ * PARAMETERS  :
+ *   @arr      : map between the two enums
+ *   @len      : len of the map
+ *   @hal_name : name of the hal_parm to map
+ *
+ * RETURN     : int type of status
+ *              fwk_name  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+template <typename halType, class mapType> int lookupFwkName(const mapType *arr,
+        size_t len, halType hal_name)
+{
+
+    for (size_t i = 0; i < len; i++) {
+        if (arr[i].hal_name == hal_name) {
+            return arr[i].fwk_name;
+        }
+    }
+
+    /* Not able to find matching framework type is not necessarily
+     * an error case. This happens when mm-camera supports more attributes
+     * than the frameworks do */
+    CDBG_HIGH("%s: Cannot find matching framework type", __func__);
+    return NAME_NOT_FOUND;
+}
+
+/*===========================================================================
+ * FUNCTION   : lookupHalName
+ *
+ * DESCRIPTION: In case the enum is not same in fwk and backend
+ *              make sure the parameter is correctly propogated
+ *
+ * PARAMETERS  :
+ *   @arr      : map between the two enums
+ *   @len      : len of the map
+ *   @fwk_name : name of the hal_parm to map
+ *
+ * RETURN     : int32_t type of status
+ *              hal_name  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+template <typename fwkType, class mapType> int lookupHalName(const mapType *arr,
+        size_t len, fwkType fwk_name)
+{
+    for (size_t i = 0; i < len; i++) {
+        if (arr[i].fwk_name == fwk_name) {
+            return arr[i].hal_name;
+        }
+    }
+
+    ALOGE("%s: Cannot find matching hal type fwk_name=%d", __func__, fwk_name);
+    return NAME_NOT_FOUND;
+}
+
+/*===========================================================================
+ * FUNCTION   : lookupProp
+ *
+ * DESCRIPTION: lookup a value by its name
+ *
+ * PARAMETERS :
+ *   @arr     : map between the two enums
+ *   @len     : size of the map
+ *   @name    : name to be looked up
+ *
+ * RETURN     : Value if found
+ *              CAM_CDS_MODE_MAX if not found
+ *==========================================================================*/
+template <class mapType> cam_cds_mode_type_t lookupProp(const mapType *arr,
+        size_t len, const char *name)
+{
+    if (name) {
+        for (size_t i = 0; i < len; i++) {
+            if (!strcmp(arr[i].desc, name)) {
+                return arr[i].val;
+            }
+        }
+    }
+    return CAM_CDS_MODE_MAX;
+}
+
+/*===========================================================================
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *   @metadata : metadata information from callback
+ *   @timestamp: metadata buffer timestamp
+ *   @request_id: request id
+ *   @jpegMetadata: additional jpeg metadata
+ *   @pprocDone: whether internal offline postprocsesing is done
+ *
+ * RETURN     : camera_metadata_t*
+ *              metadata in a format specified by fwk
+ *==========================================================================*/
+camera_metadata_t*
+QCamera3HardwareInterface::translateFromHalMetadata(
+                                 metadata_buffer_t *metadata,
+                                 nsecs_t timestamp,
+                                 int32_t request_id,
+                                 const CameraMetadata& jpegMetadata,
+                                 uint8_t pipeline_depth,
+                                 uint8_t capture_intent,
+                                 bool pprocDone)
+{
+    CameraMetadata camMetadata;
+    camera_metadata_t *resultMetadata;
+
+    if (jpegMetadata.entryCount())
+        camMetadata.append(jpegMetadata);
+
+    camMetadata.update(ANDROID_SENSOR_TIMESTAMP, &timestamp, 1);
+    camMetadata.update(ANDROID_REQUEST_ID, &request_id, 1);
+    camMetadata.update(ANDROID_REQUEST_PIPELINE_DEPTH, &pipeline_depth, 1);
+    camMetadata.update(ANDROID_CONTROL_CAPTURE_INTENT, &capture_intent, 1);
+
+    IF_META_AVAILABLE(uint32_t, frame_number, CAM_INTF_META_FRAME_NUMBER, metadata) {
+        int64_t fwk_frame_number = *frame_number;
+        camMetadata.update(ANDROID_SYNC_FRAME_NUMBER, &fwk_frame_number, 1);
+    }
+
+    IF_META_AVAILABLE(cam_fps_range_t, float_range, CAM_INTF_PARM_FPS_RANGE, metadata) {
+        int32_t fps_range[2];
+        fps_range[0] = (int32_t)float_range->min_fps;
+        fps_range[1] = (int32_t)float_range->max_fps;
+        camMetadata.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+                                      fps_range, 2);
+        CDBG("%s: urgent Metadata : ANDROID_CONTROL_AE_TARGET_FPS_RANGE [%d, %d]",
+            __func__, fps_range[0], fps_range[1]);
+    }
+
+    IF_META_AVAILABLE(int32_t, expCompensation, CAM_INTF_PARM_EXPOSURE_COMPENSATION, metadata) {
+        camMetadata.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION, expCompensation, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, sceneMode, CAM_INTF_PARM_BESTSHOT_MODE, metadata) {
+        int val = (uint8_t)lookupFwkName(SCENE_MODES_MAP,
+                METADATA_MAP_SIZE(SCENE_MODES_MAP),
+                *sceneMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t fwkSceneMode = (uint8_t)val;
+            camMetadata.update(ANDROID_CONTROL_SCENE_MODE, &fwkSceneMode, 1);
+            CDBG("%s: urgent Metadata : ANDROID_CONTROL_SCENE_MODE: %d",
+                    __func__, fwkSceneMode);
+        }
+    }
+
+    IF_META_AVAILABLE(uint32_t, ae_lock, CAM_INTF_PARM_AEC_LOCK, metadata) {
+        uint8_t fwk_ae_lock = (uint8_t) *ae_lock;
+        camMetadata.update(ANDROID_CONTROL_AE_LOCK, &fwk_ae_lock, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, awb_lock, CAM_INTF_PARM_AWB_LOCK, metadata) {
+        uint8_t fwk_awb_lock = (uint8_t) *awb_lock;
+        camMetadata.update(ANDROID_CONTROL_AWB_LOCK, &fwk_awb_lock, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, color_correct_mode, CAM_INTF_META_COLOR_CORRECT_MODE, metadata) {
+        uint8_t fwk_color_correct_mode = (uint8_t) *color_correct_mode;
+        camMetadata.update(ANDROID_COLOR_CORRECTION_MODE, &fwk_color_correct_mode, 1);
+    }
+
+    IF_META_AVAILABLE(cam_edge_application_t, edgeApplication,
+            CAM_INTF_META_EDGE_MODE, metadata) {
+        uint8_t edgeStrength = (uint8_t) edgeApplication->sharpness;
+        camMetadata.update(ANDROID_EDGE_MODE, &(edgeApplication->edge_mode), 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, flashPower, CAM_INTF_META_FLASH_POWER, metadata) {
+        uint8_t fwk_flashPower = (uint8_t) *flashPower;
+        camMetadata.update(ANDROID_FLASH_FIRING_POWER, &fwk_flashPower, 1);
+    }
+
+    IF_META_AVAILABLE(int64_t, flashFiringTime, CAM_INTF_META_FLASH_FIRING_TIME, metadata) {
+        camMetadata.update(ANDROID_FLASH_FIRING_TIME, flashFiringTime, 1);
+    }
+
+    IF_META_AVAILABLE(int32_t, flashState, CAM_INTF_META_FLASH_STATE, metadata) {
+        if (0 <= *flashState) {
+            uint8_t fwk_flashState = (uint8_t) *flashState;
+            if (!gCamCapability[mCameraId]->flash_available) {
+                fwk_flashState = ANDROID_FLASH_STATE_UNAVAILABLE;
+            }
+            camMetadata.update(ANDROID_FLASH_STATE, &fwk_flashState, 1);
+        }
+    }
+
+    IF_META_AVAILABLE(uint32_t, flashMode, CAM_INTF_META_FLASH_MODE, metadata) {
+        int val = lookupFwkName(FLASH_MODES_MAP, METADATA_MAP_SIZE(FLASH_MODES_MAP), *flashMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t fwk_flashMode = (uint8_t)val;
+            camMetadata.update(ANDROID_FLASH_MODE, &fwk_flashMode, 1);
+        }
+    }
+
+    IF_META_AVAILABLE(uint32_t, hotPixelMode, CAM_INTF_META_HOTPIXEL_MODE, metadata) {
+        uint8_t fwk_hotPixelMode = (uint8_t) *hotPixelMode;
+        camMetadata.update(ANDROID_HOT_PIXEL_MODE, &fwk_hotPixelMode, 1);
+    }
+
+    IF_META_AVAILABLE(float, lensAperture, CAM_INTF_META_LENS_APERTURE, metadata) {
+        camMetadata.update(ANDROID_LENS_APERTURE , lensAperture, 1);
+    }
+
+    IF_META_AVAILABLE(float, filterDensity, CAM_INTF_META_LENS_FILTERDENSITY, metadata) {
+        camMetadata.update(ANDROID_LENS_FILTER_DENSITY , filterDensity, 1);
+    }
+
+    IF_META_AVAILABLE(float, focalLength, CAM_INTF_META_LENS_FOCAL_LENGTH, metadata) {
+        camMetadata.update(ANDROID_LENS_FOCAL_LENGTH, focalLength, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, opticalStab, CAM_INTF_META_LENS_OPT_STAB_MODE, metadata) {
+        uint8_t fwk_opticalStab = (uint8_t) *opticalStab;
+        camMetadata.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE, &fwk_opticalStab, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, videoStab, CAM_INTF_META_VIDEO_STAB_MODE, metadata) {
+        uint8_t fwk_videoStab = (uint8_t) *videoStab;
+        camMetadata.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &fwk_videoStab, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, noiseRedMode, CAM_INTF_META_NOISE_REDUCTION_MODE, metadata) {
+        uint8_t fwk_noiseRedMode = (uint8_t) *noiseRedMode;
+        camMetadata.update(ANDROID_NOISE_REDUCTION_MODE, &fwk_noiseRedMode, 1);
+    }
+
+    IF_META_AVAILABLE(float, effectiveExposureFactor, CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR, metadata) {
+        camMetadata.update(ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR, effectiveExposureFactor, 1);
+    }
+
+    IF_META_AVAILABLE(cam_black_level_metadata_t, blackLevelSourcePattern,
+        CAM_INTF_META_BLACK_LEVEL_SOURCE_PATTERN, metadata) {
+
+        CDBG("%s: dynamicblackLevel = %f %f %f %f", __func__,
+          blackLevelSourcePattern->cam_black_level[0],
+          blackLevelSourcePattern->cam_black_level[1],
+          blackLevelSourcePattern->cam_black_level[2],
+          blackLevelSourcePattern->cam_black_level[3]);
+    }
+
+    IF_META_AVAILABLE(cam_black_level_metadata_t, blackLevelAppliedPattern,
+        CAM_INTF_META_BLACK_LEVEL_APPLIED_PATTERN, metadata) {
+        float fwk_blackLevelInd[4];
+
+        fwk_blackLevelInd[0] = blackLevelAppliedPattern->cam_black_level[0];
+        fwk_blackLevelInd[1] = blackLevelAppliedPattern->cam_black_level[1];
+        fwk_blackLevelInd[2] = blackLevelAppliedPattern->cam_black_level[2];
+        fwk_blackLevelInd[3] = blackLevelAppliedPattern->cam_black_level[3];
+
+        CDBG("%s: applied dynamicblackLevel = %f %f %f %f", __func__,
+          blackLevelAppliedPattern->cam_black_level[0],
+          blackLevelAppliedPattern->cam_black_level[1],
+          blackLevelAppliedPattern->cam_black_level[2],
+          blackLevelAppliedPattern->cam_black_level[3]);
+        camMetadata.update(QCAMERA3_SENSOR_DYNAMIC_BLACK_LEVEL_PATTERN, fwk_blackLevelInd, 4);
+        camMetadata.update(NEXUS_EXPERIMENTAL_2015_SENSOR_DYNAMIC_BLACK_LEVEL, fwk_blackLevelInd, 4);
+
+        // Update the ANDROID_SENSOR_DYNAMIC_BLACK_LEVEL
+        // Need convert the internal 16 bit depth to sensor 10 bit sensor raw
+        // depth space.
+        fwk_blackLevelInd[0] /= 64.0;
+        fwk_blackLevelInd[1] /= 64.0;
+        fwk_blackLevelInd[2] /= 64.0;
+        fwk_blackLevelInd[3] /= 64.0;
+        camMetadata.update(ANDROID_SENSOR_DYNAMIC_BLACK_LEVEL, fwk_blackLevelInd, 4);
+    }
+
+    // Fixed whitelevel is used by ISP/Sensor
+    camMetadata.update(ANDROID_SENSOR_DYNAMIC_WHITE_LEVEL,
+            &gCamCapability[mCameraId]->white_level, 1);
+
+    if (gCamCapability[mCameraId]->optical_black_region_count != 0 &&
+        gCamCapability[mCameraId]->optical_black_region_count <= MAX_OPTICAL_BLACK_REGIONS) {
+        int32_t opticalBlackRegions[MAX_OPTICAL_BLACK_REGIONS * 4];
+        for (size_t i = 0; i < gCamCapability[mCameraId]->optical_black_region_count * 4; i++) {
+            opticalBlackRegions[i] = gCamCapability[mCameraId]->optical_black_regions[i];
+        }
+        camMetadata.update(NEXUS_EXPERIMENTAL_2015_SENSOR_INFO_OPTICALLY_SHIELDED_REGIONS,
+                opticalBlackRegions, gCamCapability[mCameraId]->optical_black_region_count * 4);
+    }
+
+    IF_META_AVAILABLE(cam_crop_region_t, hScalerCropRegion,
+            CAM_INTF_META_SCALER_CROP_REGION, metadata) {
+        int32_t scalerCropRegion[4];
+        scalerCropRegion[0] = hScalerCropRegion->left;
+        scalerCropRegion[1] = hScalerCropRegion->top;
+        scalerCropRegion[2] = hScalerCropRegion->width;
+        scalerCropRegion[3] = hScalerCropRegion->height;
+
+        // Adjust crop region from sensor output coordinate system to active
+        // array coordinate system.
+        mCropRegionMapper.toActiveArray(scalerCropRegion[0], scalerCropRegion[1],
+                scalerCropRegion[2], scalerCropRegion[3]);
+
+        camMetadata.update(ANDROID_SCALER_CROP_REGION, scalerCropRegion, 4);
+    }
+
+    IF_META_AVAILABLE(int64_t, sensorExpTime, CAM_INTF_META_SENSOR_EXPOSURE_TIME, metadata) {
+        CDBG("%s: sensorExpTime = %lld", __func__, *sensorExpTime);
+        camMetadata.update(ANDROID_SENSOR_EXPOSURE_TIME , sensorExpTime, 1);
+    }
+
+    IF_META_AVAILABLE(int64_t, sensorFameDuration,
+            CAM_INTF_META_SENSOR_FRAME_DURATION, metadata) {
+        CDBG("%s: sensorFameDuration = %lld", __func__, *sensorFameDuration);
+        camMetadata.update(ANDROID_SENSOR_FRAME_DURATION, sensorFameDuration, 1);
+    }
+
+    IF_META_AVAILABLE(int64_t, sensorRollingShutterSkew,
+            CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW, metadata) {
+        CDBG("%s: sensorRollingShutterSkew = %lld", __func__, *sensorRollingShutterSkew);
+        camMetadata.update(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW,
+                sensorRollingShutterSkew, 1);
+    }
+
+    IF_META_AVAILABLE(int32_t, sensorSensitivity, CAM_INTF_META_SENSOR_SENSITIVITY, metadata) {
+        CDBG("%s: sensorSensitivity = %d", __func__, *sensorSensitivity);
+        camMetadata.update(ANDROID_SENSOR_SENSITIVITY, sensorSensitivity, 1);
+
+        //calculate the noise profile based on sensitivity
+        double noise_profile_S = computeNoiseModelEntryS(*sensorSensitivity);
+        double noise_profile_O = computeNoiseModelEntryO(*sensorSensitivity);
+        double noise_profile[2 * gCamCapability[mCameraId]->num_color_channels];
+        for (int i = 0; i < 2 * gCamCapability[mCameraId]->num_color_channels; i += 2) {
+            noise_profile[i]   = noise_profile_S;
+            noise_profile[i+1] = noise_profile_O;
+        }
+        CDBG("%s: noise model entry (S, O) is (%f, %f)", __func__,
+                noise_profile_S, noise_profile_O);
+        camMetadata.update(ANDROID_SENSOR_NOISE_PROFILE, noise_profile,
+                (size_t) (2 * gCamCapability[mCameraId]->num_color_channels));
+    }
+
+    IF_META_AVAILABLE(uint32_t, shadingMode, CAM_INTF_META_SHADING_MODE, metadata) {
+        uint8_t fwk_shadingMode = (uint8_t) *shadingMode;
+        camMetadata.update(ANDROID_SHADING_MODE, &fwk_shadingMode, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, faceDetectMode, CAM_INTF_META_STATS_FACEDETECT_MODE, metadata) {
+        int val = lookupFwkName(FACEDETECT_MODES_MAP, METADATA_MAP_SIZE(FACEDETECT_MODES_MAP),
+                *faceDetectMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t fwk_faceDetectMode = (uint8_t)val;
+            camMetadata.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &fwk_faceDetectMode, 1);
+
+            if (fwk_faceDetectMode != ANDROID_STATISTICS_FACE_DETECT_MODE_OFF) {
+                IF_META_AVAILABLE(cam_face_detection_data_t, faceDetectionInfo,
+                        CAM_INTF_META_FACE_DETECTION, metadata) {
+                    uint8_t numFaces = MIN(
+                            faceDetectionInfo->num_faces_detected, MAX_ROI);
+                    int32_t faceIds[MAX_ROI];
+                    uint8_t faceScores[MAX_ROI];
+                    int32_t faceRectangles[MAX_ROI * 4];
+                    int32_t faceLandmarks[MAX_ROI * 6];
+                    size_t j = 0, k = 0;
+
+                    for (size_t i = 0; i < numFaces; i++) {
+                        faceScores[i] = (uint8_t)faceDetectionInfo->faces[i].score;
+                        // Adjust crop region from sensor output coordinate system to active
+                        // array coordinate system.
+                        cam_rect_t& rect = faceDetectionInfo->faces[i].face_boundary;
+                        mCropRegionMapper.toActiveArray(rect.left, rect.top,
+                                rect.width, rect.height);
+
+                        convertToRegions(faceDetectionInfo->faces[i].face_boundary,
+                                faceRectangles+j, -1);
+
+                        // Map the co-ordinate sensor output coordinate system to active
+                        // array coordinate system.
+                        cam_face_detection_info_t& face = faceDetectionInfo->faces[i];
+                        mCropRegionMapper.toActiveArray(face.left_eye_center.x,
+                                face.left_eye_center.y);
+                        mCropRegionMapper.toActiveArray(face.right_eye_center.x,
+                                face.right_eye_center.y);
+                        mCropRegionMapper.toActiveArray(face.mouth_center.x,
+                                face.mouth_center.y);
+
+                        convertLandmarks(faceDetectionInfo->faces[i], faceLandmarks+k);
+                        j+= 4;
+                        k+= 6;
+                    }
+                    if (numFaces <= 0) {
+                        memset(faceIds, 0, sizeof(int32_t) * MAX_ROI);
+                        memset(faceScores, 0, sizeof(uint8_t) * MAX_ROI);
+                        memset(faceRectangles, 0, sizeof(int32_t) * MAX_ROI * 4);
+                        memset(faceLandmarks, 0, sizeof(int32_t) * MAX_ROI * 6);
+                    }
+
+                    camMetadata.update(ANDROID_STATISTICS_FACE_SCORES, faceScores,
+                            numFaces);
+                    camMetadata.update(ANDROID_STATISTICS_FACE_RECTANGLES,
+                            faceRectangles, numFaces * 4U);
+                    if (fwk_faceDetectMode ==
+                            ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
+                        camMetadata.update(ANDROID_STATISTICS_FACE_IDS, faceIds, numFaces);
+                        camMetadata.update(ANDROID_STATISTICS_FACE_LANDMARKS,
+                                faceLandmarks, numFaces * 6U);
+                   }
+                }
+            }
+        }
+    }
+
+    IF_META_AVAILABLE(uint32_t, histogramMode, CAM_INTF_META_STATS_HISTOGRAM_MODE, metadata) {
+        uint8_t fwk_histogramMode = (uint8_t) *histogramMode;
+        camMetadata.update(ANDROID_STATISTICS_HISTOGRAM_MODE, &fwk_histogramMode, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, sharpnessMapMode,
+            CAM_INTF_META_STATS_SHARPNESS_MAP_MODE, metadata) {
+        uint8_t fwk_sharpnessMapMode = (uint8_t) *sharpnessMapMode;
+        camMetadata.update(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &fwk_sharpnessMapMode, 1);
+    }
+
+    IF_META_AVAILABLE(cam_sharpness_map_t, sharpnessMap,
+            CAM_INTF_META_STATS_SHARPNESS_MAP, metadata) {
+        camMetadata.update(ANDROID_STATISTICS_SHARPNESS_MAP, (int32_t *)sharpnessMap->sharpness,
+                CAM_MAX_MAP_WIDTH * CAM_MAX_MAP_HEIGHT * 3);
+    }
+
+    IF_META_AVAILABLE(cam_lens_shading_map_t, lensShadingMap,
+            CAM_INTF_META_LENS_SHADING_MAP, metadata) {
+        size_t map_height = MIN((size_t)gCamCapability[mCameraId]->lens_shading_map_size.height,
+                CAM_MAX_SHADING_MAP_HEIGHT);
+        size_t map_width = MIN((size_t)gCamCapability[mCameraId]->lens_shading_map_size.width,
+                CAM_MAX_SHADING_MAP_WIDTH);
+        camMetadata.update(ANDROID_STATISTICS_LENS_SHADING_MAP,
+                lensShadingMap->lens_shading, 4U * map_width * map_height);
+    }
+
+    IF_META_AVAILABLE(uint32_t, toneMapMode, CAM_INTF_META_TONEMAP_MODE, metadata) {
+        uint8_t fwk_toneMapMode = (uint8_t) *toneMapMode;
+        camMetadata.update(ANDROID_TONEMAP_MODE, &fwk_toneMapMode, 1);
+    }
+
+    IF_META_AVAILABLE(cam_rgb_tonemap_curves, tonemap, CAM_INTF_META_TONEMAP_CURVES, metadata) {
+        //Populate CAM_INTF_META_TONEMAP_CURVES
+        /* ch0 = G, ch 1 = B, ch 2 = R*/
+        if (tonemap->tonemap_points_cnt > CAM_MAX_TONEMAP_CURVE_SIZE) {
+            ALOGE("%s: Fatal: tonemap_points_cnt %d exceeds max value of %d",
+                    __func__, tonemap->tonemap_points_cnt,
+                    CAM_MAX_TONEMAP_CURVE_SIZE);
+            tonemap->tonemap_points_cnt = CAM_MAX_TONEMAP_CURVE_SIZE;
+        }
+
+        camMetadata.update(ANDROID_TONEMAP_CURVE_GREEN,
+                        &tonemap->curves[0].tonemap_points[0][0],
+                        tonemap->tonemap_points_cnt * 2);
+
+        camMetadata.update(ANDROID_TONEMAP_CURVE_BLUE,
+                        &tonemap->curves[1].tonemap_points[0][0],
+                        tonemap->tonemap_points_cnt * 2);
+
+        camMetadata.update(ANDROID_TONEMAP_CURVE_RED,
+                        &tonemap->curves[2].tonemap_points[0][0],
+                        tonemap->tonemap_points_cnt * 2);
+    }
+
+    IF_META_AVAILABLE(cam_color_correct_gains_t, colorCorrectionGains,
+            CAM_INTF_META_COLOR_CORRECT_GAINS, metadata) {
+        camMetadata.update(ANDROID_COLOR_CORRECTION_GAINS, colorCorrectionGains->gains,
+                CC_GAINS_COUNT);
+    }
+
+    IF_META_AVAILABLE(cam_color_correct_matrix_t, colorCorrectionMatrix,
+            CAM_INTF_META_COLOR_CORRECT_TRANSFORM, metadata) {
+        camMetadata.update(ANDROID_COLOR_CORRECTION_TRANSFORM,
+                (camera_metadata_rational_t *)(void *)colorCorrectionMatrix->transform_matrix,
+                CC_MATRIX_COLS * CC_MATRIX_ROWS);
+    }
+
+    IF_META_AVAILABLE(cam_profile_tone_curve, toneCurve,
+            CAM_INTF_META_PROFILE_TONE_CURVE, metadata) {
+        if (toneCurve->tonemap_points_cnt > CAM_MAX_TONEMAP_CURVE_SIZE) {
+            ALOGE("%s: Fatal: tonemap_points_cnt %d exceeds max value of %d",
+                    __func__, toneCurve->tonemap_points_cnt,
+                    CAM_MAX_TONEMAP_CURVE_SIZE);
+            toneCurve->tonemap_points_cnt = CAM_MAX_TONEMAP_CURVE_SIZE;
+        }
+        camMetadata.update(ANDROID_SENSOR_PROFILE_TONE_CURVE,
+                (float*)toneCurve->curve.tonemap_points,
+                toneCurve->tonemap_points_cnt * 2);
+    }
+
+    IF_META_AVAILABLE(cam_color_correct_gains_t, predColorCorrectionGains,
+            CAM_INTF_META_PRED_COLOR_CORRECT_GAINS, metadata) {
+        camMetadata.update(ANDROID_STATISTICS_PREDICTED_COLOR_GAINS,
+                predColorCorrectionGains->gains, 4);
+    }
+
+    IF_META_AVAILABLE(cam_color_correct_matrix_t, predColorCorrectionMatrix,
+            CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM, metadata) {
+        camMetadata.update(ANDROID_STATISTICS_PREDICTED_COLOR_TRANSFORM,
+                (camera_metadata_rational_t *)(void *)predColorCorrectionMatrix->transform_matrix,
+                CC_MATRIX_ROWS * CC_MATRIX_COLS);
+    }
+
+    IF_META_AVAILABLE(float, otpWbGrGb, CAM_INTF_META_OTP_WB_GRGB, metadata) {
+        camMetadata.update(ANDROID_SENSOR_GREEN_SPLIT, otpWbGrGb, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, blackLevelLock, CAM_INTF_META_BLACK_LEVEL_LOCK, metadata) {
+        uint8_t fwk_blackLevelLock = (uint8_t) *blackLevelLock;
+        camMetadata.update(ANDROID_BLACK_LEVEL_LOCK, &fwk_blackLevelLock, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, sceneFlicker, CAM_INTF_META_SCENE_FLICKER, metadata) {
+        uint8_t fwk_sceneFlicker = (uint8_t) *sceneFlicker;
+        camMetadata.update(ANDROID_STATISTICS_SCENE_FLICKER, &fwk_sceneFlicker, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, effectMode, CAM_INTF_PARM_EFFECT, metadata) {
+        int val = lookupFwkName(EFFECT_MODES_MAP, METADATA_MAP_SIZE(EFFECT_MODES_MAP),
+                *effectMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t fwk_effectMode = (uint8_t)val;
+            camMetadata.update(ANDROID_CONTROL_EFFECT_MODE, &fwk_effectMode, 1);
+        }
+    }
+
+    IF_META_AVAILABLE(cam_test_pattern_data_t, testPatternData,
+            CAM_INTF_META_TEST_PATTERN_DATA, metadata) {
+        int32_t fwk_testPatternMode = lookupFwkName(TEST_PATTERN_MAP,
+                METADATA_MAP_SIZE(TEST_PATTERN_MAP), testPatternData->mode);
+        if (NAME_NOT_FOUND != fwk_testPatternMode) {
+            camMetadata.update(ANDROID_SENSOR_TEST_PATTERN_MODE, &fwk_testPatternMode, 1);
+        }
+        int32_t fwk_testPatternData[4];
+        fwk_testPatternData[0] = testPatternData->r;
+        fwk_testPatternData[3] = testPatternData->b;
+        switch (gCamCapability[mCameraId]->color_arrangement) {
+        case CAM_FILTER_ARRANGEMENT_RGGB:
+        case CAM_FILTER_ARRANGEMENT_GRBG:
+            fwk_testPatternData[1] = testPatternData->gr;
+            fwk_testPatternData[2] = testPatternData->gb;
+            break;
+        case CAM_FILTER_ARRANGEMENT_GBRG:
+        case CAM_FILTER_ARRANGEMENT_BGGR:
+            fwk_testPatternData[2] = testPatternData->gr;
+            fwk_testPatternData[1] = testPatternData->gb;
+            break;
+        default:
+            ALOGE("%s: color arrangement %d is not supported", __func__,
+                gCamCapability[mCameraId]->color_arrangement);
+            break;
+        }
+        camMetadata.update(ANDROID_SENSOR_TEST_PATTERN_DATA, fwk_testPatternData, 4);
+    }
+
+    IF_META_AVAILABLE(double, gps_coords, CAM_INTF_META_JPEG_GPS_COORDINATES, metadata) {
+        camMetadata.update(ANDROID_JPEG_GPS_COORDINATES, gps_coords, 3);
+    }
+
+    IF_META_AVAILABLE(uint8_t, gps_methods, CAM_INTF_META_JPEG_GPS_PROC_METHODS, metadata) {
+        String8 str((const char *)gps_methods);
+        camMetadata.update(ANDROID_JPEG_GPS_PROCESSING_METHOD, str);
+    }
+
+    IF_META_AVAILABLE(int64_t, gps_timestamp, CAM_INTF_META_JPEG_GPS_TIMESTAMP, metadata) {
+        camMetadata.update(ANDROID_JPEG_GPS_TIMESTAMP, gps_timestamp, 1);
+    }
+
+    IF_META_AVAILABLE(int32_t, jpeg_orientation, CAM_INTF_META_JPEG_ORIENTATION, metadata) {
+        camMetadata.update(ANDROID_JPEG_ORIENTATION, jpeg_orientation, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, jpeg_quality, CAM_INTF_META_JPEG_QUALITY, metadata) {
+        uint8_t fwk_jpeg_quality = (uint8_t) *jpeg_quality;
+        camMetadata.update(ANDROID_JPEG_QUALITY, &fwk_jpeg_quality, 1);
+    }
+
+    IF_META_AVAILABLE(uint32_t, thumb_quality, CAM_INTF_META_JPEG_THUMB_QUALITY, metadata) {
+        uint8_t fwk_thumb_quality = (uint8_t) *thumb_quality;
+        camMetadata.update(ANDROID_JPEG_THUMBNAIL_QUALITY, &fwk_thumb_quality, 1);
+    }
+
+    IF_META_AVAILABLE(cam_dimension_t, thumb_size, CAM_INTF_META_JPEG_THUMB_SIZE, metadata) {
+        int32_t fwk_thumb_size[2];
+        fwk_thumb_size[0] = thumb_size->width;
+        fwk_thumb_size[1] = thumb_size->height;
+        camMetadata.update(ANDROID_JPEG_THUMBNAIL_SIZE, fwk_thumb_size, 2);
+    }
+
+    IF_META_AVAILABLE(int32_t, privateData, CAM_INTF_META_PRIVATE_DATA, metadata) {
+        camMetadata.update(QCAMERA3_PRIVATEDATA_REPROCESS,
+                privateData,
+                MAX_METADATA_PRIVATE_PAYLOAD_SIZE_IN_BYTES / sizeof(int32_t));
+    }
+
+    if (metadata->is_tuning_params_valid) {
+        uint8_t tuning_meta_data_blob[sizeof(tuning_params_t)];
+        uint8_t *data = (uint8_t *)&tuning_meta_data_blob[0];
+        metadata->tuning_params.tuning_data_version = TUNING_DATA_VERSION;
+
+
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_data_version),
+                sizeof(uint32_t));
+        data += sizeof(uint32_t);
+
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_sensor_data_size),
+                sizeof(uint32_t));
+        CDBG("tuning_sensor_data_size %d",(int)(*(int *)data));
+        data += sizeof(uint32_t);
+
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_vfe_data_size),
+                sizeof(uint32_t));
+        CDBG("tuning_vfe_data_size %d",(int)(*(int *)data));
+        data += sizeof(uint32_t);
+
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_cpp_data_size),
+                sizeof(uint32_t));
+        CDBG("tuning_cpp_data_size %d",(int)(*(int *)data));
+        data += sizeof(uint32_t);
+
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_cac_data_size),
+                sizeof(uint32_t));
+        CDBG("tuning_cac_data_size %d",(int)(*(int *)data));
+        data += sizeof(uint32_t);
+
+        metadata->tuning_params.tuning_mod3_data_size = 0;
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.tuning_mod3_data_size),
+                sizeof(uint32_t));
+        CDBG("tuning_mod3_data_size %d",(int)(*(int *)data));
+        data += sizeof(uint32_t);
+
+        size_t count = MIN(metadata->tuning_params.tuning_sensor_data_size,
+                TUNING_SENSOR_DATA_MAX);
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.data),
+                count);
+        data += count;
+
+        count = MIN(metadata->tuning_params.tuning_vfe_data_size,
+                TUNING_VFE_DATA_MAX);
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.data[TUNING_VFE_DATA_OFFSET]),
+                count);
+        data += count;
+
+        count = MIN(metadata->tuning_params.tuning_cpp_data_size,
+                TUNING_CPP_DATA_MAX);
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.data[TUNING_CPP_DATA_OFFSET]),
+                count);
+        data += count;
+
+        count = MIN(metadata->tuning_params.tuning_cac_data_size,
+                TUNING_CAC_DATA_MAX);
+        memcpy(data, ((uint8_t *)&metadata->tuning_params.data[TUNING_CAC_DATA_OFFSET]),
+                count);
+        data += count;
+
+        camMetadata.update(QCAMERA3_TUNING_META_DATA_BLOB,
+                (int32_t *)(void *)tuning_meta_data_blob,
+                (size_t)(data-tuning_meta_data_blob) / sizeof(uint32_t));
+    }
+
+    IF_META_AVAILABLE(cam_neutral_col_point_t, neuColPoint,
+            CAM_INTF_META_NEUTRAL_COL_POINT, metadata) {
+        camMetadata.update(ANDROID_SENSOR_NEUTRAL_COLOR_POINT,
+                (camera_metadata_rational_t *)(void *)neuColPoint->neutral_col_point,
+                NEUTRAL_COL_POINTS);
+    }
+
+    IF_META_AVAILABLE(uint32_t, shadingMapMode, CAM_INTF_META_LENS_SHADING_MAP_MODE, metadata) {
+        uint8_t fwk_shadingMapMode = (uint8_t) *shadingMapMode;
+        camMetadata.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &fwk_shadingMapMode, 1);
+    }
+
+    IF_META_AVAILABLE(cam_area_t, hAeRegions, CAM_INTF_META_AEC_ROI, metadata) {
+        int32_t aeRegions[REGIONS_TUPLE_COUNT];
+        // Adjust crop region from sensor output coordinate system to active
+        // array coordinate system.
+        mCropRegionMapper.toActiveArray(hAeRegions->rect.left, hAeRegions->rect.top,
+                hAeRegions->rect.width, hAeRegions->rect.height);
+
+        convertToRegions(hAeRegions->rect, aeRegions, hAeRegions->weight);
+        camMetadata.update(ANDROID_CONTROL_AE_REGIONS, aeRegions,
+                REGIONS_TUPLE_COUNT);
+        CDBG("%s: Metadata : ANDROID_CONTROL_AE_REGIONS: FWK: [%d,%d,%d,%d] HAL: [%d,%d,%d,%d]",
+                __func__, aeRegions[0], aeRegions[1], aeRegions[2], aeRegions[3],
+                hAeRegions->rect.left, hAeRegions->rect.top, hAeRegions->rect.width,
+                hAeRegions->rect.height);
+    }
+
+    IF_META_AVAILABLE(uint32_t, afState, CAM_INTF_META_AF_STATE, metadata) {
+        uint8_t fwk_afState = (uint8_t) *afState;
+        camMetadata.update(ANDROID_CONTROL_AF_STATE, &fwk_afState, 1);
+        CDBG("%s: urgent Metadata : ANDROID_CONTROL_AF_STATE %u", __func__, *afState);
+    }
+
+    IF_META_AVAILABLE(float, focusDistance, CAM_INTF_META_LENS_FOCUS_DISTANCE, metadata) {
+        camMetadata.update(ANDROID_LENS_FOCUS_DISTANCE , focusDistance, 1);
+    }
+
+    IF_META_AVAILABLE(float, focusRange, CAM_INTF_META_LENS_FOCUS_RANGE, metadata) {
+        camMetadata.update(ANDROID_LENS_FOCUS_RANGE , focusRange, 2);
+    }
+
+    IF_META_AVAILABLE(cam_af_lens_state_t, lensState, CAM_INTF_META_LENS_STATE, metadata) {
+        uint8_t fwk_lensState = *lensState;
+        camMetadata.update(ANDROID_LENS_STATE , &fwk_lensState, 1);
+    }
+
+    IF_META_AVAILABLE(cam_area_t, hAfRegions, CAM_INTF_META_AF_ROI, metadata) {
+        /*af regions*/
+        int32_t afRegions[REGIONS_TUPLE_COUNT];
+        // Adjust crop region from sensor output coordinate system to active
+        // array coordinate system.
+        mCropRegionMapper.toActiveArray(hAfRegions->rect.left, hAfRegions->rect.top,
+                hAfRegions->rect.width, hAfRegions->rect.height);
+
+        convertToRegions(hAfRegions->rect, afRegions, hAfRegions->weight);
+        camMetadata.update(ANDROID_CONTROL_AF_REGIONS, afRegions,
+                REGIONS_TUPLE_COUNT);
+        CDBG("%s: Metadata : ANDROID_CONTROL_AF_REGIONS: FWK: [%d,%d,%d,%d] HAL: [%d,%d,%d,%d]",
+                __func__, afRegions[0], afRegions[1], afRegions[2], afRegions[3],
+                hAfRegions->rect.left, hAfRegions->rect.top, hAfRegions->rect.width,
+                hAfRegions->rect.height);
+    }
+
+    IF_META_AVAILABLE(uint32_t, hal_ab_mode, CAM_INTF_PARM_ANTIBANDING, metadata) {
+        int val = lookupFwkName(ANTIBANDING_MODES_MAP, METADATA_MAP_SIZE(ANTIBANDING_MODES_MAP),
+                *hal_ab_mode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t fwk_ab_mode = (uint8_t)val;
+            camMetadata.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &fwk_ab_mode, 1);
+        }
+    }
+
+    IF_META_AVAILABLE(uint32_t, bestshotMode, CAM_INTF_PARM_BESTSHOT_MODE, metadata) {
+        int val = lookupFwkName(SCENE_MODES_MAP,
+                METADATA_MAP_SIZE(SCENE_MODES_MAP), *bestshotMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t fwkBestshotMode = (uint8_t)val;
+            camMetadata.update(ANDROID_CONTROL_SCENE_MODE, &fwkBestshotMode, 1);
+            CDBG("%s: Metadata : ANDROID_CONTROL_SCENE_MODE", __func__);
+        } else {
+            CDBG_HIGH("%s: Metadata not found : ANDROID_CONTROL_SCENE_MODE", __func__);
+        }
+    }
+
+    IF_META_AVAILABLE(uint32_t, mode, CAM_INTF_META_MODE, metadata) {
+         uint8_t fwk_mode = (uint8_t) *mode;
+         camMetadata.update(ANDROID_CONTROL_MODE, &fwk_mode, 1);
+    }
+
+    /* Constant metadata values to be update*/
+    uint8_t hotPixelModeFast = ANDROID_HOT_PIXEL_MODE_FAST;
+    camMetadata.update(ANDROID_HOT_PIXEL_MODE, &hotPixelModeFast, 1);
+
+    uint8_t hotPixelMapMode = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
+    camMetadata.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1);
+
+    int32_t hotPixelMap[2];
+    camMetadata.update(ANDROID_STATISTICS_HOT_PIXEL_MAP, &hotPixelMap[0], 0);
+
+    // CDS
+    IF_META_AVAILABLE(int32_t, cds, CAM_INTF_PARM_CDS_MODE, metadata) {
+        camMetadata.update(QCAMERA3_CDS_MODE, cds, 1);
+    }
+
+    // TNR
+    IF_META_AVAILABLE(cam_denoise_param_t, tnr, CAM_INTF_PARM_TEMPORAL_DENOISE, metadata) {
+        uint8_t tnr_enable       = tnr->denoise_enable;
+        int32_t tnr_process_type = (int32_t)tnr->process_plates;
+
+        camMetadata.update(QCAMERA3_TEMPORAL_DENOISE_ENABLE, &tnr_enable, 1);
+        camMetadata.update(QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE, &tnr_process_type, 1);
+    }
+
+    // Reprocess crop data
+    IF_META_AVAILABLE(cam_crop_data_t, crop_data, CAM_INTF_META_CROP_DATA, metadata) {
+        uint8_t cnt = crop_data->num_of_streams;
+        if ( (0 >= cnt) || (cnt > MAX_NUM_STREAMS)) {
+            // mm-qcamera-daemon only posts crop_data for streams
+            // not linked to pproc. So no valid crop metadata is not
+            // necessarily an error case.
+            CDBG("%s: No valid crop metadata entries", __func__);
+        } else {
+            uint32_t reproc_stream_id;
+            if ( NO_ERROR != getReprocessibleOutputStreamId(reproc_stream_id)) {
+                CDBG("%s: No reprocessible stream found, ignore crop data", __func__);
+            } else {
+                int rc = NO_ERROR;
+                Vector<int32_t> roi_map;
+                int32_t *crop = new int32_t[cnt*4];
+                if (NULL == crop) {
+                   rc = NO_MEMORY;
+                }
+                if (NO_ERROR == rc) {
+                    int32_t streams_found = 0;
+                    for (size_t i = 0; i < cnt; i++) {
+                        if (crop_data->crop_info[i].stream_id == reproc_stream_id) {
+                            if (pprocDone) {
+                                // HAL already does internal reprocessing,
+                                // either via reprocessing before JPEG encoding,
+                                // or offline postprocessing for pproc bypass case.
+                                crop[0] = 0;
+                                crop[1] = 0;
+                                crop[2] = mInputStreamInfo.dim.width;
+                                crop[3] = mInputStreamInfo.dim.height;
+                            } else {
+                                crop[0] = crop_data->crop_info[i].crop.left;
+                                crop[1] = crop_data->crop_info[i].crop.top;
+                                crop[2] = crop_data->crop_info[i].crop.width;
+                                crop[3] = crop_data->crop_info[i].crop.height;
+                            }
+                            roi_map.add(crop_data->crop_info[i].roi_map.left);
+                            roi_map.add(crop_data->crop_info[i].roi_map.top);
+                            roi_map.add(crop_data->crop_info[i].roi_map.width);
+                            roi_map.add(crop_data->crop_info[i].roi_map.height);
+                            streams_found++;
+                            CDBG("%s: Adding reprocess crop data for stream %dx%d, %dx%d",
+                                    __func__,
+                                    crop[0], crop[1], crop[2], crop[3]);
+                            CDBG("%s: Adding reprocess crop roi map for stream %dx%d, %dx%d",
+                                    __func__,
+                                    crop_data->crop_info[i].roi_map.left,
+                                    crop_data->crop_info[i].roi_map.top,
+                                    crop_data->crop_info[i].roi_map.width,
+                                    crop_data->crop_info[i].roi_map.height);
+                            break;
+
+                       }
+                    }
+                    camMetadata.update(QCAMERA3_CROP_COUNT_REPROCESS,
+                            &streams_found, 1);
+                    camMetadata.update(QCAMERA3_CROP_REPROCESS,
+                            crop, (size_t)(streams_found * 4));
+                    if (roi_map.array()) {
+                        camMetadata.update(QCAMERA3_CROP_ROI_MAP_REPROCESS,
+                                roi_map.array(), roi_map.size());
+                    }
+               }
+               if (crop) {
+                   delete [] crop;
+               }
+            }
+        }
+    }
+
+    IF_META_AVAILABLE(cam_aberration_mode_t, cacMode, CAM_INTF_PARM_CAC, metadata) {
+        int val = lookupFwkName(COLOR_ABERRATION_MAP, METADATA_MAP_SIZE(COLOR_ABERRATION_MAP),
+                *cacMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t fwkCacMode = (uint8_t)val;
+            camMetadata.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, &fwkCacMode, 1);
+        } else {
+            ALOGE("%s: Invalid CAC camera parameter: %d", __func__, *cacMode);
+        }
+    }
+
+    // Post blob of cam_cds_data through vendor tag.
+    IF_META_AVAILABLE(cam_cds_data_t, cdsInfo, CAM_INTF_META_CDS_DATA, metadata) {
+        uint8_t cnt = cdsInfo->num_of_streams;
+        cam_cds_data_t cdsDataOverride;
+        memset(&cdsDataOverride, 0, sizeof(cdsDataOverride));
+        cdsDataOverride.session_cds_enable = cdsInfo->session_cds_enable;
+        cdsDataOverride.num_of_streams = 1;
+        if ((0 < cnt) && (cnt <= MAX_NUM_STREAMS)) {
+            uint32_t reproc_stream_id;
+            if ( NO_ERROR != getReprocessibleOutputStreamId(reproc_stream_id)) {
+                CDBG("%s: No reprocessible stream found, ignore cds data", __func__);
+            } else {
+                for (size_t i = 0; i < cnt; i++) {
+                    if (cdsInfo->cds_info[i].stream_id ==
+                            reproc_stream_id) {
+                        cdsDataOverride.cds_info[0].cds_enable =
+                                cdsInfo->cds_info[i].cds_enable;
+                        break;
+                    }
+                }
+            }
+        } else {
+            CDBG("%s: Invalid stream count %d in CDS_DATA", __func__, cnt);
+        }
+        camMetadata.update(QCAMERA3_CDS_INFO,
+                (uint8_t *)&cdsDataOverride,
+                sizeof(cam_cds_data_t));
+    }
+
+    // Ldaf calibration data
+    if (!mLdafCalibExist) {
+        IF_META_AVAILABLE(uint32_t, ldafCalib,
+                CAM_INTF_META_LDAF_EXIF, metadata) {
+            mLdafCalibExist = true;
+            mLdafCalib[0] = ldafCalib[0];
+            mLdafCalib[1] = ldafCalib[1];
+            CDBG("%s: ldafCalib[0] is %d, ldafCalib[1] is %d", __func__,
+                    ldafCalib[0], ldafCalib[1]);
+        }
+    }
+
+    resultMetadata = camMetadata.release();
+    return resultMetadata;
+}
+
+/*===========================================================================
+ * FUNCTION   : saveExifParams
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *   @metadata : metadata information from callback
+ *
+ * RETURN     : none
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::saveExifParams(metadata_buffer_t *metadata)
+{
+    IF_META_AVAILABLE(cam_ae_exif_debug_t, ae_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_AE, metadata) {
+        mExifParams.ae_debug_params = *ae_exif_debug_params;
+        mExifParams.ae_debug_params_valid = TRUE;
+    }
+    IF_META_AVAILABLE(cam_awb_exif_debug_t,awb_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_AWB, metadata) {
+        mExifParams.awb_debug_params = *awb_exif_debug_params;
+        mExifParams.awb_debug_params_valid = TRUE;
+    }
+    IF_META_AVAILABLE(cam_af_exif_debug_t,af_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_AF, metadata) {
+        mExifParams.af_debug_params = *af_exif_debug_params;
+        mExifParams.af_debug_params_valid = TRUE;
+    }
+    IF_META_AVAILABLE(cam_asd_exif_debug_t, asd_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_ASD, metadata) {
+        mExifParams.asd_debug_params = *asd_exif_debug_params;
+        mExifParams.asd_debug_params_valid = TRUE;
+    }
+    IF_META_AVAILABLE(cam_stats_buffer_exif_debug_t,stats_exif_debug_params,
+            CAM_INTF_META_EXIF_DEBUG_STATS, metadata) {
+        mExifParams.stats_debug_params = *stats_exif_debug_params;
+        mExifParams.stats_debug_params_valid = TRUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : get3AExifParams
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS : none
+ *
+ *
+ * RETURN     : mm_jpeg_exif_params_t
+ *
+ *==========================================================================*/
+mm_jpeg_exif_params_t QCamera3HardwareInterface::get3AExifParams()
+{
+    return mExifParams;
+}
+
+/*===========================================================================
+ * FUNCTION   : translateCbUrgentMetadataToResultMetadata
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *   @metadata : metadata information from callback
+ *
+ * RETURN     : camera_metadata_t*
+ *              metadata in a format specified by fwk
+ *==========================================================================*/
+camera_metadata_t*
+QCamera3HardwareInterface::translateCbUrgentMetadataToResultMetadata
+                                (metadata_buffer_t *metadata)
+{
+    CameraMetadata camMetadata;
+    camera_metadata_t *resultMetadata;
+
+
+    IF_META_AVAILABLE(uint32_t, whiteBalanceState, CAM_INTF_META_AWB_STATE, metadata) {
+        uint8_t fwk_whiteBalanceState = (uint8_t) *whiteBalanceState;
+        camMetadata.update(ANDROID_CONTROL_AWB_STATE, &fwk_whiteBalanceState, 1);
+        CDBG("%s: urgent Metadata : ANDROID_CONTROL_AWB_STATE %u", __func__, *whiteBalanceState);
+    }
+
+    IF_META_AVAILABLE(cam_trigger_t, aecTrigger, CAM_INTF_META_AEC_PRECAPTURE_TRIGGER, metadata) {
+        camMetadata.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+                &aecTrigger->trigger, 1);
+        camMetadata.update(ANDROID_CONTROL_AE_PRECAPTURE_ID,
+                &aecTrigger->trigger_id, 1);
+        CDBG("%s: urgent Metadata : CAM_INTF_META_AEC_PRECAPTURE_TRIGGER: %d",
+                __func__, aecTrigger->trigger);
+        CDBG("%s: urgent Metadata : ANDROID_CONTROL_AE_PRECAPTURE_ID: %d", __func__,
+                aecTrigger->trigger_id);
+    }
+
+    IF_META_AVAILABLE(uint32_t, ae_state, CAM_INTF_META_AEC_STATE, metadata) {
+        uint8_t fwk_ae_state = (uint8_t) *ae_state;
+        camMetadata.update(ANDROID_CONTROL_AE_STATE, &fwk_ae_state, 1);
+        CDBG("%s: urgent Metadata : ANDROID_CONTROL_AE_STATE %u", __func__, *ae_state);
+    }
+
+    IF_META_AVAILABLE(uint32_t, focusMode, CAM_INTF_PARM_FOCUS_MODE, metadata) {
+        int val = lookupFwkName(FOCUS_MODES_MAP, METADATA_MAP_SIZE(FOCUS_MODES_MAP), *focusMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t fwkAfMode = (uint8_t)val;
+            camMetadata.update(ANDROID_CONTROL_AF_MODE, &fwkAfMode, 1);
+            CDBG("%s: urgent Metadata : ANDROID_CONTROL_AF_MODE", __func__);
+        } else {
+            CDBG_HIGH("%s: urgent Metadata not found : ANDROID_CONTROL_AF_MODE %d", __func__,
+                    val);
+        }
+    }
+
+    IF_META_AVAILABLE(cam_trigger_t, af_trigger, CAM_INTF_META_AF_TRIGGER, metadata) {
+        camMetadata.update(ANDROID_CONTROL_AF_TRIGGER,
+                &af_trigger->trigger, 1);
+        CDBG("%s: urgent Metadata : CAM_INTF_META_AF_TRIGGER = %d",
+                __func__, af_trigger->trigger);
+        camMetadata.update(ANDROID_CONTROL_AF_TRIGGER_ID, &af_trigger->trigger_id, 1);
+        CDBG("%s: urgent Metadata : ANDROID_CONTROL_AF_TRIGGER_ID = %d", __func__,
+                af_trigger->trigger_id);
+    }
+
+    IF_META_AVAILABLE(int32_t, whiteBalance, CAM_INTF_PARM_WHITE_BALANCE, metadata) {
+        int val = lookupFwkName(WHITE_BALANCE_MODES_MAP,
+                METADATA_MAP_SIZE(WHITE_BALANCE_MODES_MAP), *whiteBalance);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t fwkWhiteBalanceMode = (uint8_t)val;
+            camMetadata.update(ANDROID_CONTROL_AWB_MODE, &fwkWhiteBalanceMode, 1);
+            CDBG("%s: urgent Metadata : ANDROID_CONTROL_AWB_MODE %d", __func__, val);
+        } else {
+            CDBG_HIGH("%s: urgent Metadata not found : ANDROID_CONTROL_AWB_MODE", __func__);
+        }
+    }
+
+    uint8_t fwk_aeMode = ANDROID_CONTROL_AE_MODE_OFF;
+    uint32_t aeMode = CAM_AE_MODE_MAX;
+    int32_t flashMode = CAM_FLASH_MODE_MAX;
+    int32_t redeye = -1;
+    IF_META_AVAILABLE(uint32_t, pAeMode, CAM_INTF_META_AEC_MODE, metadata) {
+        aeMode = *pAeMode;
+    }
+    IF_META_AVAILABLE(int32_t, pFlashMode, CAM_INTF_PARM_LED_MODE, metadata) {
+        flashMode = *pFlashMode;
+    }
+    IF_META_AVAILABLE(int32_t, pRedeye, CAM_INTF_PARM_REDEYE_REDUCTION, metadata) {
+        redeye = *pRedeye;
+    }
+
+    if (1 == redeye) {
+        fwk_aeMode = ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE;
+        camMetadata.update(ANDROID_CONTROL_AE_MODE, &fwk_aeMode, 1);
+    } else if ((CAM_FLASH_MODE_AUTO == flashMode) || (CAM_FLASH_MODE_ON == flashMode)) {
+        int val = lookupFwkName(AE_FLASH_MODE_MAP, METADATA_MAP_SIZE(AE_FLASH_MODE_MAP),
+                flashMode);
+        if (NAME_NOT_FOUND != val) {
+            fwk_aeMode = (uint8_t)val;
+            camMetadata.update(ANDROID_CONTROL_AE_MODE, &fwk_aeMode, 1);
+        } else {
+            ALOGE("%s: Unsupported flash mode %d", __func__, flashMode);
+        }
+    } else if (aeMode == CAM_AE_MODE_ON) {
+        fwk_aeMode = ANDROID_CONTROL_AE_MODE_ON;
+        camMetadata.update(ANDROID_CONTROL_AE_MODE, &fwk_aeMode, 1);
+    } else if (aeMode == CAM_AE_MODE_OFF) {
+        fwk_aeMode = ANDROID_CONTROL_AE_MODE_OFF;
+        camMetadata.update(ANDROID_CONTROL_AE_MODE, &fwk_aeMode, 1);
+    } else {
+        ALOGE("%s: Not enough info to deduce ANDROID_CONTROL_AE_MODE redeye:%d, "
+              "flashMode:%d, aeMode:%u!!!",
+                __func__, redeye, flashMode, aeMode);
+    }
+
+    resultMetadata = camMetadata.release();
+    return resultMetadata;
+}
+
+/*===========================================================================
+ * FUNCTION   : dumpMetadataToFile
+ *
+ * DESCRIPTION: Dumps tuning metadata to file system
+ *
+ * PARAMETERS :
+ *   @meta           : tuning metadata
+ *   @dumpFrameCount : current dump frame count
+ *   @enabled        : Enable mask
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::dumpMetadataToFile(tuning_params_t &meta,
+                                                   uint32_t &dumpFrameCount,
+                                                   bool enabled,
+                                                   const char *type,
+                                                   uint32_t frameNumber)
+{
+    uint32_t frm_num = 0;
+
+    //Some sanity checks
+    if (meta.tuning_sensor_data_size > TUNING_SENSOR_DATA_MAX) {
+        ALOGE("%s : Tuning sensor data size bigger than expected %d: %d",
+              __func__,
+              meta.tuning_sensor_data_size,
+              TUNING_SENSOR_DATA_MAX);
+        return;
+    }
+
+    if (meta.tuning_vfe_data_size > TUNING_VFE_DATA_MAX) {
+        ALOGE("%s : Tuning VFE data size bigger than expected %d: %d",
+              __func__,
+              meta.tuning_vfe_data_size,
+              TUNING_VFE_DATA_MAX);
+        return;
+    }
+
+    if (meta.tuning_cpp_data_size > TUNING_CPP_DATA_MAX) {
+        ALOGE("%s : Tuning CPP data size bigger than expected %d: %d",
+              __func__,
+              meta.tuning_cpp_data_size,
+              TUNING_CPP_DATA_MAX);
+        return;
+    }
+
+    if (meta.tuning_cac_data_size > TUNING_CAC_DATA_MAX) {
+        ALOGE("%s : Tuning CAC data size bigger than expected %d: %d",
+              __func__,
+              meta.tuning_cac_data_size,
+              TUNING_CAC_DATA_MAX);
+        return;
+    }
+    //
+
+    if(enabled){
+        char timeBuf[FILENAME_MAX];
+        char buf[FILENAME_MAX];
+        memset(buf, 0, sizeof(buf));
+        memset(timeBuf, 0, sizeof(timeBuf));
+        time_t current_time;
+        struct tm * timeinfo;
+        time (&current_time);
+        timeinfo = localtime (&current_time);
+        if (timeinfo != NULL) {
+            strftime (timeBuf, sizeof(timeBuf),
+                    QCAMERA_DUMP_FRM_LOCATION"%Y%m%d%H%M%S", timeinfo);
+        }
+        String8 filePath(timeBuf);
+        snprintf(buf,
+                sizeof(buf),
+                "%dm_%s_%d.bin",
+                dumpFrameCount,
+                type,
+                frameNumber);
+        filePath.append(buf);
+        int file_fd = open(filePath.string(), O_RDWR | O_CREAT, 0777);
+        if (file_fd >= 0) {
+            ssize_t written_len = 0;
+            meta.tuning_data_version = TUNING_DATA_VERSION;
+            void *data = (void *)((uint8_t *)&meta.tuning_data_version);
+            written_len += write(file_fd, data, sizeof(uint32_t));
+            data = (void *)((uint8_t *)&meta.tuning_sensor_data_size);
+            CDBG("tuning_sensor_data_size %d",(int)(*(int *)data));
+            written_len += write(file_fd, data, sizeof(uint32_t));
+            data = (void *)((uint8_t *)&meta.tuning_vfe_data_size);
+            CDBG("tuning_vfe_data_size %d",(int)(*(int *)data));
+            written_len += write(file_fd, data, sizeof(uint32_t));
+            data = (void *)((uint8_t *)&meta.tuning_cpp_data_size);
+            CDBG("tuning_cpp_data_size %d",(int)(*(int *)data));
+            written_len += write(file_fd, data, sizeof(uint32_t));
+            data = (void *)((uint8_t *)&meta.tuning_cac_data_size);
+            CDBG("tuning_cac_data_size %d",(int)(*(int *)data));
+            written_len += write(file_fd, data, sizeof(uint32_t));
+            meta.tuning_mod3_data_size = 0;
+            data = (void *)((uint8_t *)&meta.tuning_mod3_data_size);
+            CDBG("tuning_mod3_data_size %d",(int)(*(int *)data));
+            written_len += write(file_fd, data, sizeof(uint32_t));
+            size_t total_size = meta.tuning_sensor_data_size;
+            data = (void *)((uint8_t *)&meta.data);
+            written_len += write(file_fd, data, total_size);
+            total_size = meta.tuning_vfe_data_size;
+            data = (void *)((uint8_t *)&meta.data[TUNING_VFE_DATA_OFFSET]);
+            written_len += write(file_fd, data, total_size);
+            total_size = meta.tuning_cpp_data_size;
+            data = (void *)((uint8_t *)&meta.data[TUNING_CPP_DATA_OFFSET]);
+            written_len += write(file_fd, data, total_size);
+            total_size = meta.tuning_cac_data_size;
+            data = (void *)((uint8_t *)&meta.data[TUNING_CAC_DATA_OFFSET]);
+            written_len += write(file_fd, data, total_size);
+            close(file_fd);
+        }else {
+            ALOGE("%s: fail to open file for metadata dumping", __func__);
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : cleanAndSortStreamInfo
+ *
+ * DESCRIPTION: helper method to clean up invalid streams in stream_info,
+ *              and sort them such that raw stream is at the end of the list
+ *              This is a workaround for camera daemon constraint.
+ *
+ * PARAMETERS : None
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::cleanAndSortStreamInfo()
+{
+    List<stream_info_t *> newStreamInfo;
+
+    /*clean up invalid streams*/
+    for (List<stream_info_t*>::iterator it=mStreamInfo.begin();
+            it != mStreamInfo.end();) {
+        if(((*it)->status) == INVALID){
+            QCamera3Channel *channel = (QCamera3Channel*)(*it)->stream->priv;
+            delete channel;
+            free(*it);
+            it = mStreamInfo.erase(it);
+        } else {
+            it++;
+        }
+    }
+
+    // Move preview/video/callback/snapshot streams into newList
+    for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+            it != mStreamInfo.end();) {
+        if ((*it)->stream->format != HAL_PIXEL_FORMAT_RAW_OPAQUE &&
+                (*it)->stream->format != HAL_PIXEL_FORMAT_RAW10 &&
+                (*it)->stream->format != HAL_PIXEL_FORMAT_RAW16) {
+            newStreamInfo.push_back(*it);
+            it = mStreamInfo.erase(it);
+        } else
+            it++;
+    }
+    // Move raw streams into newList
+    for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+            it != mStreamInfo.end();) {
+        newStreamInfo.push_back(*it);
+        it = mStreamInfo.erase(it);
+    }
+
+    mStreamInfo = newStreamInfo;
+}
+
+/*===========================================================================
+ * FUNCTION   : extractJpegMetadata
+ *
+ * DESCRIPTION: helper method to extract Jpeg metadata from capture request.
+ *              JPEG metadata is cached in HAL, and return as part of capture
+ *              result when metadata is returned from camera daemon.
+ *
+ * PARAMETERS : @jpegMetadata: jpeg metadata to be extracted
+ *              @request:      capture request
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::extractJpegMetadata(
+        CameraMetadata& jpegMetadata,
+        const camera3_capture_request_t *request)
+{
+    CameraMetadata frame_settings;
+    frame_settings = request->settings;
+
+    if (frame_settings.exists(ANDROID_JPEG_GPS_COORDINATES))
+        jpegMetadata.update(ANDROID_JPEG_GPS_COORDINATES,
+                frame_settings.find(ANDROID_JPEG_GPS_COORDINATES).data.d,
+                frame_settings.find(ANDROID_JPEG_GPS_COORDINATES).count);
+
+    if (frame_settings.exists(ANDROID_JPEG_GPS_PROCESSING_METHOD))
+        jpegMetadata.update(ANDROID_JPEG_GPS_PROCESSING_METHOD,
+                frame_settings.find(ANDROID_JPEG_GPS_PROCESSING_METHOD).data.u8,
+                frame_settings.find(ANDROID_JPEG_GPS_PROCESSING_METHOD).count);
+
+    if (frame_settings.exists(ANDROID_JPEG_GPS_TIMESTAMP))
+        jpegMetadata.update(ANDROID_JPEG_GPS_TIMESTAMP,
+                frame_settings.find(ANDROID_JPEG_GPS_TIMESTAMP).data.i64,
+                frame_settings.find(ANDROID_JPEG_GPS_TIMESTAMP).count);
+
+    if (frame_settings.exists(ANDROID_JPEG_ORIENTATION))
+        jpegMetadata.update(ANDROID_JPEG_ORIENTATION,
+                frame_settings.find(ANDROID_JPEG_ORIENTATION).data.i32,
+                frame_settings.find(ANDROID_JPEG_ORIENTATION).count);
+
+    if (frame_settings.exists(ANDROID_JPEG_QUALITY))
+        jpegMetadata.update(ANDROID_JPEG_QUALITY,
+                frame_settings.find(ANDROID_JPEG_QUALITY).data.u8,
+                frame_settings.find(ANDROID_JPEG_QUALITY).count);
+
+    if (frame_settings.exists(ANDROID_JPEG_THUMBNAIL_QUALITY))
+        jpegMetadata.update(ANDROID_JPEG_THUMBNAIL_QUALITY,
+                frame_settings.find(ANDROID_JPEG_THUMBNAIL_QUALITY).data.u8,
+                frame_settings.find(ANDROID_JPEG_THUMBNAIL_QUALITY).count);
+
+    if (frame_settings.exists(ANDROID_JPEG_THUMBNAIL_SIZE)) {
+        int32_t thumbnail_size[2];
+        thumbnail_size[0] = frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[0];
+        thumbnail_size[1] = frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[1];
+        if (frame_settings.exists(ANDROID_JPEG_ORIENTATION)) {
+            int32_t orientation =
+                  frame_settings.find(ANDROID_JPEG_ORIENTATION).data.i32[0];
+            if ((orientation == 90) || (orientation == 270)) {
+               //swap thumbnail dimensions for rotations 90 and 270 in jpeg metadata.
+               int32_t temp;
+               temp = thumbnail_size[0];
+               thumbnail_size[0] = thumbnail_size[1];
+               thumbnail_size[1] = temp;
+            }
+         }
+         jpegMetadata.update(ANDROID_JPEG_THUMBNAIL_SIZE,
+                thumbnail_size,
+                frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).count);
+    }
+
+}
+
+/*===========================================================================
+ * FUNCTION   : convertToRegions
+ *
+ * DESCRIPTION: helper method to convert from cam_rect_t into int32_t array
+ *
+ * PARAMETERS :
+ *   @rect   : cam_rect_t struct to convert
+ *   @region : int32_t destination array
+ *   @weight : if we are converting from cam_area_t, weight is valid
+ *             else weight = -1
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::convertToRegions(cam_rect_t rect,
+        int32_t *region, int weight)
+{
+    region[0] = rect.left;
+    region[1] = rect.top;
+    region[2] = rect.left + rect.width;
+    region[3] = rect.top + rect.height;
+    if (weight > -1) {
+        region[4] = weight;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : convertFromRegions
+ *
+ * DESCRIPTION: helper method to convert from array to cam_rect_t
+ *
+ * PARAMETERS :
+ *   @rect   : cam_rect_t struct to convert
+ *   @region : int32_t destination array
+ *   @weight : if we are converting from cam_area_t, weight is valid
+ *             else weight = -1
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::convertFromRegions(cam_area_t &roi,
+        const camera_metadata_t *settings, uint32_t tag)
+{
+    CameraMetadata frame_settings;
+    frame_settings = settings;
+    int32_t x_min = frame_settings.find(tag).data.i32[0];
+    int32_t y_min = frame_settings.find(tag).data.i32[1];
+    int32_t x_max = frame_settings.find(tag).data.i32[2];
+    int32_t y_max = frame_settings.find(tag).data.i32[3];
+    roi.weight = frame_settings.find(tag).data.i32[4];
+    roi.rect.left = x_min;
+    roi.rect.top = y_min;
+    roi.rect.width = x_max - x_min;
+    roi.rect.height = y_max - y_min;
+}
+
+/*===========================================================================
+ * FUNCTION   : resetIfNeededROI
+ *
+ * DESCRIPTION: helper method to reset the roi if it is greater than scaler
+ *              crop region
+ *
+ * PARAMETERS :
+ *   @roi       : cam_area_t struct to resize
+ *   @scalerCropRegion : cam_crop_region_t region to compare against
+ *
+ *
+ *==========================================================================*/
+bool QCamera3HardwareInterface::resetIfNeededROI(cam_area_t* roi,
+                                                 const cam_crop_region_t* scalerCropRegion)
+{
+    int32_t roi_x_max = roi->rect.width + roi->rect.left;
+    int32_t roi_y_max = roi->rect.height + roi->rect.top;
+    int32_t crop_x_max = scalerCropRegion->width + scalerCropRegion->left;
+    int32_t crop_y_max = scalerCropRegion->height + scalerCropRegion->top;
+
+    /* According to spec weight = 0 is used to indicate roi needs to be disabled
+     * without having this check the calculations below to validate if the roi
+     * is inside scalar crop region will fail resulting in the roi not being
+     * reset causing algorithm to continue to use stale roi window
+     */
+    if (roi->weight == 0) {
+        return true;
+    }
+
+    if ((roi_x_max < scalerCropRegion->left) ||
+        // right edge of roi window is left of scalar crop's left edge
+        (roi_y_max < scalerCropRegion->top)  ||
+        // bottom edge of roi window is above scalar crop's top edge
+        (roi->rect.left > crop_x_max) ||
+        // left edge of roi window is beyond(right) of scalar crop's right edge
+        (roi->rect.top > crop_y_max)){
+        // top edge of roi windo is above scalar crop's top edge
+        return false;
+    }
+    if (roi->rect.left < scalerCropRegion->left) {
+        roi->rect.left = scalerCropRegion->left;
+    }
+    if (roi->rect.top < scalerCropRegion->top) {
+        roi->rect.top = scalerCropRegion->top;
+    }
+    if (roi_x_max > crop_x_max) {
+        roi_x_max = crop_x_max;
+    }
+    if (roi_y_max > crop_y_max) {
+        roi_y_max = crop_y_max;
+    }
+    roi->rect.width = roi_x_max - roi->rect.left;
+    roi->rect.height = roi_y_max - roi->rect.top;
+    return true;
+}
+
+/*===========================================================================
+ * FUNCTION   : convertLandmarks
+ *
+ * DESCRIPTION: helper method to extract the landmarks from face detection info
+ *
+ * PARAMETERS :
+ *   @face   : cam_rect_t struct to convert
+ *   @landmarks : int32_t destination array
+ *
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::convertLandmarks(cam_face_detection_info_t face, int32_t *landmarks)
+{
+    landmarks[0] = (int32_t)face.left_eye_center.x;
+    landmarks[1] = (int32_t)face.left_eye_center.y;
+    landmarks[2] = (int32_t)face.right_eye_center.x;
+    landmarks[3] = (int32_t)face.right_eye_center.y;
+    landmarks[4] = (int32_t)face.mouth_center.x;
+    landmarks[5] = (int32_t)face.mouth_center.y;
+}
+
+#define DATA_PTR(MEM_OBJ,INDEX) MEM_OBJ->getPtr( INDEX )
+/*===========================================================================
+ * FUNCTION   : initCapabilities
+ *
+ * DESCRIPTION: initialize camera capabilities in static data struct
+ *
+ * PARAMETERS :
+ *   @cameraId  : camera Id
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HardwareInterface::initCapabilities(uint32_t cameraId)
+{
+    int rc = 0;
+    mm_camera_vtbl_t *cameraHandle = NULL;
+    QCamera3HeapMemory *capabilityHeap = NULL;
+
+    rc = camera_open((uint8_t)cameraId, &cameraHandle);
+    if (rc || !cameraHandle) {
+        ALOGE("%s: camera_open failed. rc = %d, cameraHandle = %p", __func__, rc, cameraHandle);
+        goto open_failed;
+    }
+
+    capabilityHeap = new QCamera3HeapMemory(1);
+    if (capabilityHeap == NULL) {
+        ALOGE("%s: creation of capabilityHeap failed", __func__);
+        goto heap_creation_failed;
+    }
+    /* Allocate memory for capability buffer */
+    rc = capabilityHeap->allocate(sizeof(cam_capability_t));
+    if(rc != OK) {
+        ALOGE("%s: No memory for cappability", __func__);
+        goto allocate_failed;
+    }
+
+    /* Map memory for capability buffer */
+    memset(DATA_PTR(capabilityHeap,0), 0, sizeof(cam_capability_t));
+    rc = cameraHandle->ops->map_buf(cameraHandle->camera_handle,
+                                CAM_MAPPING_BUF_TYPE_CAPABILITY,
+                                capabilityHeap->getFd(0),
+                                sizeof(cam_capability_t));
+    if(rc < 0) {
+        ALOGE("%s: failed to map capability buffer", __func__);
+        goto map_failed;
+    }
+
+    /* Query Capability */
+    rc = cameraHandle->ops->query_capability(cameraHandle->camera_handle);
+    if(rc < 0) {
+        ALOGE("%s: failed to query capability",__func__);
+        goto query_failed;
+    }
+    gCamCapability[cameraId] = (cam_capability_t *)malloc(sizeof(cam_capability_t));
+    if (!gCamCapability[cameraId]) {
+        ALOGE("%s: out of memory", __func__);
+        goto query_failed;
+    }
+    memcpy(gCamCapability[cameraId], DATA_PTR(capabilityHeap,0),
+                                        sizeof(cam_capability_t));
+    rc = 0;
+
+query_failed:
+    cameraHandle->ops->unmap_buf(cameraHandle->camera_handle,
+                            CAM_MAPPING_BUF_TYPE_CAPABILITY);
+map_failed:
+    capabilityHeap->deallocate();
+allocate_failed:
+    delete capabilityHeap;
+heap_creation_failed:
+    cameraHandle->ops->close_camera(cameraHandle->camera_handle);
+    cameraHandle = NULL;
+open_failed:
+    return rc;
+}
+
+/*==========================================================================
+ * FUNCTION   : get3Aversion
+ *
+ * DESCRIPTION: get the Q3A S/W version
+ *
+ * PARAMETERS :
+ *  @sw_version: Reference of Q3A structure which will hold version info upon
+ *               return
+ *
+ * RETURN     : None
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::get3AVersion(cam_q3a_version_t &sw_version)
+{
+    if(gCamCapability[mCameraId])
+        sw_version = gCamCapability[mCameraId]->q3a_version;
+    else
+        ALOGE("%s:Capability structure NULL!", __func__);
+}
+
+
+/*===========================================================================
+ * FUNCTION   : initParameters
+ *
+ * DESCRIPTION: initialize camera parameters
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HardwareInterface::initParameters()
+{
+    int rc = 0;
+
+    //Allocate Set Param Buffer
+    mParamHeap = new QCamera3HeapMemory(1);
+    rc = mParamHeap->allocate(sizeof(metadata_buffer_t));
+    if(rc != OK) {
+        rc = NO_MEMORY;
+        ALOGE("Failed to allocate SETPARM Heap memory");
+        delete mParamHeap;
+        mParamHeap = NULL;
+        return rc;
+    }
+
+    //Map memory for parameters buffer
+    rc = mCameraHandle->ops->map_buf(mCameraHandle->camera_handle,
+            CAM_MAPPING_BUF_TYPE_PARM_BUF,
+            mParamHeap->getFd(0),
+            sizeof(metadata_buffer_t));
+    if(rc < 0) {
+        ALOGE("%s:failed to map SETPARM buffer",__func__);
+        rc = FAILED_TRANSACTION;
+        mParamHeap->deallocate();
+        delete mParamHeap;
+        mParamHeap = NULL;
+        return rc;
+    }
+
+    mParameters = (metadata_buffer_t *) DATA_PTR(mParamHeap,0);
+
+    mPrevParameters = (metadata_buffer_t *)malloc(sizeof(metadata_buffer_t));
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : deinitParameters
+ *
+ * DESCRIPTION: de-initialize camera parameters
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : NONE
+ *==========================================================================*/
+void QCamera3HardwareInterface::deinitParameters()
+{
+    mCameraHandle->ops->unmap_buf(mCameraHandle->camera_handle,
+            CAM_MAPPING_BUF_TYPE_PARM_BUF);
+
+    mParamHeap->deallocate();
+    delete mParamHeap;
+    mParamHeap = NULL;
+
+    mParameters = NULL;
+
+    free(mPrevParameters);
+    mPrevParameters = NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : calcMaxJpegSize
+ *
+ * DESCRIPTION: Calculates maximum jpeg size supported by the cameraId
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : max_jpeg_size
+ *==========================================================================*/
+size_t QCamera3HardwareInterface::calcMaxJpegSize(uint32_t camera_id)
+{
+    size_t max_jpeg_size = 0;
+    size_t temp_width, temp_height;
+    size_t count = MIN(gCamCapability[camera_id]->picture_sizes_tbl_cnt,
+            MAX_SIZES_CNT);
+    for (size_t i = 0; i < count; i++) {
+        temp_width = (size_t)gCamCapability[camera_id]->picture_sizes_tbl[i].width;
+        temp_height = (size_t)gCamCapability[camera_id]->picture_sizes_tbl[i].height;
+        if (temp_width * temp_height > max_jpeg_size ) {
+            max_jpeg_size = temp_width * temp_height;
+        }
+    }
+    max_jpeg_size = max_jpeg_size * 3/2 + sizeof(camera3_jpeg_blob_t);
+    return max_jpeg_size;
+}
+
+/*===========================================================================
+ * FUNCTION   : getMaxRawSize
+ *
+ * DESCRIPTION: Fetches maximum raw size supported by the cameraId
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : Largest supported Raw Dimension
+ *==========================================================================*/
+cam_dimension_t QCamera3HardwareInterface::getMaxRawSize(uint32_t camera_id)
+{
+    int max_width = 0;
+    cam_dimension_t maxRawSize;
+
+    memset(&maxRawSize, 0, sizeof(cam_dimension_t));
+    for (size_t i = 0; i < gCamCapability[camera_id]->supported_raw_dim_cnt; i++) {
+        if (max_width < gCamCapability[camera_id]->raw_dim[i].width) {
+            max_width = gCamCapability[camera_id]->raw_dim[i].width;
+            maxRawSize = gCamCapability[camera_id]->raw_dim[i];
+        }
+    }
+    return maxRawSize;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : calcMaxJpegDim
+ *
+ * DESCRIPTION: Calculates maximum jpeg dimension supported by the cameraId
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : max_jpeg_dim
+ *==========================================================================*/
+cam_dimension_t QCamera3HardwareInterface::calcMaxJpegDim()
+{
+    cam_dimension_t max_jpeg_dim;
+    cam_dimension_t curr_jpeg_dim;
+    max_jpeg_dim.width = 0;
+    max_jpeg_dim.height = 0;
+    curr_jpeg_dim.width = 0;
+    curr_jpeg_dim.height = 0;
+    for (size_t i = 0; i < gCamCapability[mCameraId]->picture_sizes_tbl_cnt; i++) {
+        curr_jpeg_dim.width = gCamCapability[mCameraId]->picture_sizes_tbl[i].width;
+        curr_jpeg_dim.height = gCamCapability[mCameraId]->picture_sizes_tbl[i].height;
+        if (curr_jpeg_dim.width * curr_jpeg_dim.height >
+            max_jpeg_dim.width * max_jpeg_dim.height ) {
+            max_jpeg_dim.width = curr_jpeg_dim.width;
+            max_jpeg_dim.height = curr_jpeg_dim.height;
+        }
+    }
+    return max_jpeg_dim;
+}
+
+/*===========================================================================
+ * FUNCTION   : addStreamConfig
+ *
+ * DESCRIPTION: adds the stream configuration to the array
+ *
+ * PARAMETERS :
+ * @available_stream_configs : pointer to stream configuration array
+ * @scalar_format            : scalar format
+ * @dim                      : configuration dimension
+ * @config_type              : input or output configuration type
+ *
+ * RETURN     : NONE
+ *==========================================================================*/
+void QCamera3HardwareInterface::addStreamConfig(Vector<int32_t> &available_stream_configs,
+        int32_t scalar_format, const cam_dimension_t &dim, int32_t config_type)
+{
+    available_stream_configs.add(scalar_format);
+    available_stream_configs.add(dim.width);
+    available_stream_configs.add(dim.height);
+    available_stream_configs.add(config_type);
+}
+
+
+/*===========================================================================
+ * FUNCTION   : initStaticMetadata
+ *
+ * DESCRIPTION: initialize the static metadata
+ *
+ * PARAMETERS :
+ *   @cameraId  : camera Id
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              non-zero failure code
+ *==========================================================================*/
+int QCamera3HardwareInterface::initStaticMetadata(uint32_t cameraId)
+{
+    int rc = 0;
+    CameraMetadata staticInfo;
+    size_t count = 0;
+    bool limitedDevice = false;
+    char prop[PROPERTY_VALUE_MAX];
+
+    /* If sensor is YUV sensor (no raw support) or if per-frame control is not
+     * guaranteed, its advertised as limited device */
+    limitedDevice = gCamCapability[cameraId]->no_per_frame_control_support ||
+            (CAM_SENSOR_YUV == gCamCapability[cameraId]->sensor_type.sens_type);
+
+    uint8_t supportedHwLvl = limitedDevice ?
+            ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_LIMITED :
+            ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL_FULL;
+
+    staticInfo.update(ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL,
+            &supportedHwLvl, 1);
+
+    bool facingBack = gCamCapability[cameraId]->position == CAM_POSITION_BACK;
+    /*HAL 3 only*/
+    staticInfo.update(ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+                    &gCamCapability[cameraId]->min_focus_distance, 1);
+
+    staticInfo.update(ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE,
+                    &gCamCapability[cameraId]->hyper_focal_distance, 1);
+
+    /*should be using focal lengths but sensor doesn't provide that info now*/
+    staticInfo.update(ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+                      &gCamCapability[cameraId]->focal_length,
+                      1);
+
+    staticInfo.update(ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+                      gCamCapability[cameraId]->apertures,
+                      gCamCapability[cameraId]->apertures_count);
+
+    staticInfo.update(ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES,
+                gCamCapability[cameraId]->filter_densities,
+                gCamCapability[cameraId]->filter_densities_count);
+
+
+    staticInfo.update(ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+                      (uint8_t *)gCamCapability[cameraId]->optical_stab_modes,
+                      gCamCapability[cameraId]->optical_stab_modes_count);
+
+    int32_t lens_shading_map_size[] = {gCamCapability[cameraId]->lens_shading_map_size.width,
+            gCamCapability[cameraId]->lens_shading_map_size.height};
+    staticInfo.update(ANDROID_LENS_INFO_SHADING_MAP_SIZE,
+                      lens_shading_map_size,
+                      sizeof(lens_shading_map_size)/sizeof(int32_t));
+
+    staticInfo.update(ANDROID_SENSOR_INFO_PHYSICAL_SIZE,
+            gCamCapability[cameraId]->sensor_physical_size, SENSOR_PHYSICAL_SIZE_CNT);
+
+    staticInfo.update(ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE,
+            gCamCapability[cameraId]->exposure_time_range, EXPOSURE_TIME_RANGE_CNT);
+
+    staticInfo.update(ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+            &gCamCapability[cameraId]->max_frame_duration, 1);
+
+    camera_metadata_rational baseGainFactor = {
+            gCamCapability[cameraId]->base_gain_factor.numerator,
+            gCamCapability[cameraId]->base_gain_factor.denominator};
+    staticInfo.update(ANDROID_SENSOR_BASE_GAIN_FACTOR,
+                      &baseGainFactor, 1);
+
+    staticInfo.update(ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+                     (uint8_t *)&gCamCapability[cameraId]->color_arrangement, 1);
+
+    int32_t pixel_array_size[] = {gCamCapability[cameraId]->pixel_array_size.width,
+            gCamCapability[cameraId]->pixel_array_size.height};
+    staticInfo.update(ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+                      pixel_array_size, sizeof(pixel_array_size)/sizeof(pixel_array_size[0]));
+
+    int32_t active_array_size[] = {gCamCapability[cameraId]->active_array_size.left,
+                                                gCamCapability[cameraId]->active_array_size.top,
+                                                gCamCapability[cameraId]->active_array_size.width,
+                                                gCamCapability[cameraId]->active_array_size.height};
+    staticInfo.update(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+                      active_array_size, sizeof(active_array_size)/sizeof(active_array_size[0]));
+
+    staticInfo.update(ANDROID_SENSOR_INFO_WHITE_LEVEL,
+            &gCamCapability[cameraId]->white_level, 1);
+
+    staticInfo.update(ANDROID_SENSOR_BLACK_LEVEL_PATTERN,
+            gCamCapability[cameraId]->black_level_pattern, BLACK_LEVEL_PATTERN_CNT);
+
+    bool hasBlackRegions = false;
+    if (gCamCapability[cameraId]->optical_black_region_count != 0 &&
+            gCamCapability[cameraId]->optical_black_region_count <= MAX_OPTICAL_BLACK_REGIONS) {
+        int32_t opticalBlackRegions[MAX_OPTICAL_BLACK_REGIONS * 4];
+        for (size_t i = 0; i < gCamCapability[cameraId]->optical_black_region_count * 4; i++) {
+            opticalBlackRegions[i] = gCamCapability[cameraId]->optical_black_regions[i];
+        }
+        staticInfo.update(ANDROID_SENSOR_OPTICAL_BLACK_REGIONS,
+                opticalBlackRegions, gCamCapability[cameraId]->optical_black_region_count * 4);
+        hasBlackRegions = true;
+    }
+
+    staticInfo.update(ANDROID_FLASH_INFO_CHARGE_DURATION,
+                      &gCamCapability[cameraId]->flash_charge_duration, 1);
+
+    staticInfo.update(ANDROID_TONEMAP_MAX_CURVE_POINTS,
+                      &gCamCapability[cameraId]->max_tone_map_curve_points, 1);
+
+    uint8_t timestampSource = ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE_REALTIME;
+    staticInfo.update(ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
+            &timestampSource, 1);
+
+    staticInfo.update(ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT,
+                      &gCamCapability[cameraId]->histogram_size, 1);
+
+    staticInfo.update(ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT,
+            &gCamCapability[cameraId]->max_histogram_count, 1);
+
+    int32_t sharpness_map_size[] = {gCamCapability[cameraId]->sharpness_map_size.width,
+            gCamCapability[cameraId]->sharpness_map_size.height};
+
+    staticInfo.update(ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE,
+            sharpness_map_size, sizeof(sharpness_map_size)/sizeof(int32_t));
+
+    staticInfo.update(ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE,
+            &gCamCapability[cameraId]->max_sharpness_map_value, 1);
+
+    int32_t scalar_formats[] = {
+            ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE,
+            ANDROID_SCALER_AVAILABLE_FORMATS_RAW16,
+            ANDROID_SCALER_AVAILABLE_FORMATS_YCbCr_420_888,
+            ANDROID_SCALER_AVAILABLE_FORMATS_BLOB,
+            HAL_PIXEL_FORMAT_RAW10,
+            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED};
+    size_t scalar_formats_count = sizeof(scalar_formats) / sizeof(int32_t);
+    staticInfo.update(ANDROID_SCALER_AVAILABLE_FORMATS,
+                      scalar_formats,
+                      scalar_formats_count);
+
+    int32_t available_processed_sizes[MAX_SIZES_CNT * 2];
+    count = MIN(gCamCapability[cameraId]->picture_sizes_tbl_cnt, MAX_SIZES_CNT);
+    makeTable(gCamCapability[cameraId]->picture_sizes_tbl,
+            count, MAX_SIZES_CNT, available_processed_sizes);
+    staticInfo.update(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES,
+            available_processed_sizes, count * 2);
+
+    int32_t available_raw_sizes[MAX_SIZES_CNT * 2];
+    count = MIN(gCamCapability[cameraId]->supported_raw_dim_cnt, MAX_SIZES_CNT);
+    makeTable(gCamCapability[cameraId]->raw_dim,
+            count, MAX_SIZES_CNT, available_raw_sizes);
+    staticInfo.update(ANDROID_SCALER_AVAILABLE_RAW_SIZES,
+            available_raw_sizes, count * 2);
+
+    int32_t available_fps_ranges[MAX_SIZES_CNT * 2];
+    count = MIN(gCamCapability[cameraId]->fps_ranges_tbl_cnt, MAX_SIZES_CNT);
+    makeFPSTable(gCamCapability[cameraId]->fps_ranges_tbl,
+            count, MAX_SIZES_CNT, available_fps_ranges);
+    staticInfo.update(ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+            available_fps_ranges, count * 2);
+
+    camera_metadata_rational exposureCompensationStep = {
+            gCamCapability[cameraId]->exp_compensation_step.numerator,
+            gCamCapability[cameraId]->exp_compensation_step.denominator};
+    staticInfo.update(ANDROID_CONTROL_AE_COMPENSATION_STEP,
+                      &exposureCompensationStep, 1);
+
+    Vector<uint8_t> availableVstabModes;
+    availableVstabModes.add(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF);
+    char eis_prop[PROPERTY_VALUE_MAX];
+    memset(eis_prop, 0, sizeof(eis_prop));
+    property_get("persist.camera.eis.enable", eis_prop, "0");
+    uint8_t eis_prop_set = (uint8_t)atoi(eis_prop);
+    if (facingBack && eis_prop_set) {
+        availableVstabModes.add(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON);
+    }
+    staticInfo.update(ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+                      availableVstabModes.array(), availableVstabModes.size());
+
+    /*HAL 1 and HAL 3 common*/
+    float maxZoom = 4;
+    staticInfo.update(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+            &maxZoom, 1);
+
+    uint8_t croppingType = ANDROID_SCALER_CROPPING_TYPE_FREEFORM;
+    staticInfo.update(ANDROID_SCALER_CROPPING_TYPE, &croppingType, 1);
+
+    int32_t max3aRegions[3] = {/*AE*/1,/*AWB*/ 0,/*AF*/ 1};
+    if (gCamCapability[cameraId]->supported_focus_modes_cnt == 1)
+        max3aRegions[2] = 0; /* AF not supported */
+    staticInfo.update(ANDROID_CONTROL_MAX_REGIONS,
+            max3aRegions, 3);
+
+    /* 0: OFF, 1: OFF+SIMPLE, 2: OFF+FULL, 3: OFF+SIMPLE+FULL */
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.facedetect", prop, "1");
+    uint8_t supportedFaceDetectMode = (uint8_t)atoi(prop);
+    CDBG("%s: Support face detection mode: %d",
+            __func__, supportedFaceDetectMode);
+
+    int32_t maxFaces = gCamCapability[cameraId]->max_num_roi;
+    Vector<uint8_t> availableFaceDetectModes;
+    availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_OFF);
+    if (supportedFaceDetectMode == 1) {
+        availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE);
+    } else if (supportedFaceDetectMode == 2) {
+        availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_FULL);
+    } else if (supportedFaceDetectMode == 3) {
+        availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE);
+        availableFaceDetectModes.add(ANDROID_STATISTICS_FACE_DETECT_MODE_FULL);
+    } else {
+        maxFaces = 0;
+    }
+    staticInfo.update(ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+            availableFaceDetectModes.array(),
+            availableFaceDetectModes.size());
+    staticInfo.update(ANDROID_STATISTICS_INFO_MAX_FACE_COUNT,
+            (int32_t *)&maxFaces, 1);
+
+    int32_t exposureCompensationRange[] = {gCamCapability[cameraId]->exposure_compensation_min,
+                                           gCamCapability[cameraId]->exposure_compensation_max};
+    staticInfo.update(ANDROID_CONTROL_AE_COMPENSATION_RANGE,
+            exposureCompensationRange,
+            sizeof(exposureCompensationRange)/sizeof(int32_t));
+
+    uint8_t lensFacing = (facingBack) ?
+            ANDROID_LENS_FACING_BACK : ANDROID_LENS_FACING_FRONT;
+    staticInfo.update(ANDROID_LENS_FACING, &lensFacing, 1);
+
+    staticInfo.update(ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+                      available_thumbnail_sizes,
+                      sizeof(available_thumbnail_sizes)/sizeof(int32_t));
+
+    /*all sizes will be clubbed into this tag*/
+    int32_t available_jpeg_sizes[MAX_SIZES_CNT * 2];
+    count = MIN(gCamCapability[cameraId]->picture_sizes_tbl_cnt, MAX_SIZES_CNT);
+    size_t jpeg_sizes_cnt = filterJpegSizes(available_jpeg_sizes, available_processed_sizes,
+            count * 2, MAX_SIZES_CNT * 2, gCamCapability[cameraId]->active_array_size,
+            gCamCapability[cameraId]->max_downscale_factor);
+    /*android.scaler.availableStreamConfigurations*/
+    size_t max_stream_configs_size = count * scalar_formats_count * 4;
+    Vector<int32_t> available_stream_configs;
+    cam_dimension_t active_array_dim;
+    active_array_dim.width = gCamCapability[cameraId]->active_array_size.width;
+    active_array_dim.height = gCamCapability[cameraId]->active_array_size.height;
+    /* Add input/output stream configurations for each scalar formats*/
+    for (size_t j = 0; j < scalar_formats_count; j++) {
+        switch (scalar_formats[j]) {
+        case ANDROID_SCALER_AVAILABLE_FORMATS_RAW16:
+        case ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE:
+        case HAL_PIXEL_FORMAT_RAW10:
+            for (size_t i = 0; i < gCamCapability[cameraId]->supported_raw_dim_cnt; i++) {
+                addStreamConfig(available_stream_configs, scalar_formats[j],
+                        gCamCapability[cameraId]->raw_dim[i],
+                        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
+            }
+            break;
+        case HAL_PIXEL_FORMAT_BLOB:
+            cam_dimension_t jpeg_size;
+            for (size_t i = 0; i < jpeg_sizes_cnt/2; i++) {
+                jpeg_size.width  = available_jpeg_sizes[i*2];
+                jpeg_size.height = available_jpeg_sizes[i*2+1];
+                addStreamConfig(available_stream_configs, scalar_formats[j],
+                        jpeg_size,
+                        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
+            }
+            break;
+        case HAL_PIXEL_FORMAT_YCbCr_420_888:
+        case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+        default:
+            cam_dimension_t largest_picture_size;
+            memset(&largest_picture_size, 0, sizeof(cam_dimension_t));
+            for (size_t i = 0; i < gCamCapability[cameraId]->picture_sizes_tbl_cnt; i++) {
+                addStreamConfig(available_stream_configs, scalar_formats[j],
+                        gCamCapability[cameraId]->picture_sizes_tbl[i],
+                        ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
+                /* Book keep largest */
+                if (gCamCapability[cameraId]->picture_sizes_tbl[i].width
+                        >= largest_picture_size.width &&
+                        gCamCapability[cameraId]->picture_sizes_tbl[i].height
+                        >= largest_picture_size.height)
+                    largest_picture_size = gCamCapability[cameraId]->picture_sizes_tbl[i];
+            }
+            /*For below 2 formats we also support i/p streams for reprocessing advertise those*/
+            if (scalar_formats[j] == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED ||
+                    scalar_formats[j] == HAL_PIXEL_FORMAT_YCbCr_420_888) {
+                 addStreamConfig(available_stream_configs, scalar_formats[j],
+                         largest_picture_size,
+                         ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT);
+            }
+            break;
+        }
+    }
+
+    staticInfo.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+                      available_stream_configs.array(), available_stream_configs.size());
+    static const uint8_t hotpixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
+    staticInfo.update(ANDROID_HOT_PIXEL_MODE, &hotpixelMode, 1);
+
+    static const uint8_t hotPixelMapMode = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
+    staticInfo.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1);
+
+    /* android.scaler.availableMinFrameDurations */
+    int64_t available_min_durations[max_stream_configs_size];
+    size_t idx = 0;
+    for (size_t j = 0; j < scalar_formats_count; j++) {
+        switch (scalar_formats[j]) {
+        case ANDROID_SCALER_AVAILABLE_FORMATS_RAW16:
+        case ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE:
+        case HAL_PIXEL_FORMAT_RAW10:
+            for (size_t i = 0; i < gCamCapability[cameraId]->supported_raw_dim_cnt; i++) {
+                available_min_durations[idx] = scalar_formats[j];
+                available_min_durations[idx+1] =
+                    gCamCapability[cameraId]->raw_dim[i].width;
+                available_min_durations[idx+2] =
+                    gCamCapability[cameraId]->raw_dim[i].height;
+                available_min_durations[idx+3] =
+                    gCamCapability[cameraId]->raw_min_duration[i];
+                idx+=4;
+            }
+            break;
+        default:
+            for (size_t i = 0; i < gCamCapability[cameraId]->picture_sizes_tbl_cnt; i++) {
+                available_min_durations[idx] = scalar_formats[j];
+                available_min_durations[idx+1] =
+                    gCamCapability[cameraId]->picture_sizes_tbl[i].width;
+                available_min_durations[idx+2] =
+                    gCamCapability[cameraId]->picture_sizes_tbl[i].height;
+                available_min_durations[idx+3] =
+                    gCamCapability[cameraId]->picture_min_duration[i];
+                idx+=4;
+            }
+            break;
+        }
+    }
+    staticInfo.update(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+                      &available_min_durations[0], idx);
+
+    Vector<int32_t> available_hfr_configs;
+    for (size_t i = 0; i < gCamCapability[cameraId]->hfr_tbl_cnt; i++) {
+        int32_t fps = 0;
+        switch (gCamCapability[cameraId]->hfr_tbl[i].mode) {
+        case CAM_HFR_MODE_60FPS:
+            fps = 60;
+            break;
+        case CAM_HFR_MODE_90FPS:
+            fps = 90;
+            break;
+        case CAM_HFR_MODE_120FPS:
+            fps = 120;
+            break;
+        case CAM_HFR_MODE_150FPS:
+            fps = 150;
+            break;
+        case CAM_HFR_MODE_180FPS:
+            fps = 180;
+            break;
+        case CAM_HFR_MODE_210FPS:
+            fps = 210;
+            break;
+        case CAM_HFR_MODE_240FPS:
+            fps = 240;
+            break;
+        case CAM_HFR_MODE_480FPS:
+            fps = 480;
+            break;
+        case CAM_HFR_MODE_OFF:
+        case CAM_HFR_MODE_MAX:
+        default:
+            break;
+        }
+
+        /* Advertise only MIN_FPS_FOR_BATCH_MODE or above as HIGH_SPEED_CONFIGS */
+        if (fps >= MIN_FPS_FOR_BATCH_MODE) {
+            /* For each HFR frame rate, need to advertise one variable fps range
+             * and one fixed fps range. Eg: for 120 FPS, advertise [30, 120] and
+             * [120, 120]. While camcorder preview alone is running [30, 120] is
+             * set by the app. When video recording is started, [120, 120] is
+             * set. This way sensor configuration does not change when recording
+             * is started */
+
+            /* (width, height, fps_min, fps_max, batch_size_max) */
+            available_hfr_configs.add(
+                    gCamCapability[cameraId]->hfr_tbl[i].dim.width);
+            available_hfr_configs.add(
+                    gCamCapability[cameraId]->hfr_tbl[i].dim.height);
+            available_hfr_configs.add(PREVIEW_FPS_FOR_HFR);
+            available_hfr_configs.add(fps);
+            available_hfr_configs.add(fps / PREVIEW_FPS_FOR_HFR);
+
+            /* (width, height, fps_min, fps_max, batch_size_max) */
+            available_hfr_configs.add(
+                    gCamCapability[cameraId]->hfr_tbl[i].dim.width);
+            available_hfr_configs.add(
+                    gCamCapability[cameraId]->hfr_tbl[i].dim.height);
+            available_hfr_configs.add(fps);
+            available_hfr_configs.add(fps);
+            available_hfr_configs.add(fps / PREVIEW_FPS_FOR_HFR);
+       }
+    }
+    //Advertise HFR capability only if the property is set
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.hal3hfr.enable", prop, "1");
+    uint8_t hfrEnable = (uint8_t)atoi(prop);
+
+    if(hfrEnable && available_hfr_configs.array()) {
+        staticInfo.update(
+                ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS,
+                available_hfr_configs.array(), available_hfr_configs.size());
+    }
+
+    int32_t max_jpeg_size = (int32_t)calcMaxJpegSize(cameraId);
+    staticInfo.update(ANDROID_JPEG_MAX_SIZE,
+                      &max_jpeg_size, 1);
+
+    uint8_t avail_effects[CAM_EFFECT_MODE_MAX];
+    size_t size = 0;
+    count = CAM_EFFECT_MODE_MAX;
+    count = MIN(gCamCapability[cameraId]->supported_effects_cnt, count);
+    for (size_t i = 0; i < count; i++) {
+        int val = lookupFwkName(EFFECT_MODES_MAP, METADATA_MAP_SIZE(EFFECT_MODES_MAP),
+                gCamCapability[cameraId]->supported_effects[i]);
+        if (NAME_NOT_FOUND != val) {
+            avail_effects[size] = (uint8_t)val;
+            size++;
+        }
+    }
+    staticInfo.update(ANDROID_CONTROL_AVAILABLE_EFFECTS,
+                      avail_effects,
+                      size);
+
+    uint8_t avail_scene_modes[CAM_SCENE_MODE_MAX];
+    uint8_t supported_indexes[CAM_SCENE_MODE_MAX];
+    size_t supported_scene_modes_cnt = 0;
+    count = CAM_SCENE_MODE_MAX;
+    count = MIN(gCamCapability[cameraId]->supported_scene_modes_cnt, count);
+    for (size_t i = 0; i < count; i++) {
+        if (gCamCapability[cameraId]->supported_scene_modes[i] !=
+                CAM_SCENE_MODE_OFF) {
+            int val = lookupFwkName(SCENE_MODES_MAP,
+                    METADATA_MAP_SIZE(SCENE_MODES_MAP),
+                    gCamCapability[cameraId]->supported_scene_modes[i]);
+            if (NAME_NOT_FOUND != val) {
+                avail_scene_modes[supported_scene_modes_cnt] = (uint8_t)val;
+                supported_indexes[supported_scene_modes_cnt] = (uint8_t)i;
+                supported_scene_modes_cnt++;
+            }
+        }
+    }
+    staticInfo.update(ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+                      avail_scene_modes,
+                      supported_scene_modes_cnt);
+
+    uint8_t scene_mode_overrides[CAM_SCENE_MODE_MAX  * 3];
+    makeOverridesList(gCamCapability[cameraId]->scene_mode_overrides,
+                      supported_scene_modes_cnt,
+                      CAM_SCENE_MODE_MAX,
+                      scene_mode_overrides,
+                      supported_indexes,
+                      cameraId);
+
+    if (supported_scene_modes_cnt == 0) {
+        supported_scene_modes_cnt = 1;
+        avail_scene_modes[0] = ANDROID_CONTROL_SCENE_MODE_DISABLED;
+    }
+
+    staticInfo.update(ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
+            scene_mode_overrides, supported_scene_modes_cnt * 3);
+
+    uint8_t available_control_modes[] = {ANDROID_CONTROL_MODE_OFF,
+                                         ANDROID_CONTROL_MODE_AUTO,
+                                         ANDROID_CONTROL_MODE_USE_SCENE_MODE};
+    staticInfo.update(ANDROID_CONTROL_AVAILABLE_MODES,
+            available_control_modes,
+            3);
+
+    uint8_t avail_antibanding_modes[CAM_ANTIBANDING_MODE_MAX];
+    size = 0;
+    count = CAM_ANTIBANDING_MODE_MAX;
+    count = MIN(gCamCapability[cameraId]->supported_antibandings_cnt, count);
+    for (size_t i = 0; i < count; i++) {
+        int val = lookupFwkName(ANTIBANDING_MODES_MAP, METADATA_MAP_SIZE(ANTIBANDING_MODES_MAP),
+                gCamCapability[cameraId]->supported_antibandings[i]);
+        if (NAME_NOT_FOUND != val) {
+            avail_antibanding_modes[size] = (uint8_t)val;
+            size++;
+        }
+
+    }
+    staticInfo.update(ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+                      avail_antibanding_modes,
+                      size);
+
+    uint8_t avail_abberation_modes[CAM_COLOR_CORRECTION_ABERRATION_MAX];
+    size = 0;
+    count = CAM_COLOR_CORRECTION_ABERRATION_MAX;
+    count = MIN(gCamCapability[cameraId]->aberration_modes_count, count);
+    if (0 == count) {
+        avail_abberation_modes[0] =
+                ANDROID_COLOR_CORRECTION_ABERRATION_MODE_OFF;
+        size++;
+    } else {
+        for (size_t i = 0; i < count; i++) {
+            int val = lookupFwkName(COLOR_ABERRATION_MAP, METADATA_MAP_SIZE(COLOR_ABERRATION_MAP),
+                    gCamCapability[cameraId]->aberration_modes[i]);
+            if (NAME_NOT_FOUND != val) {
+                avail_abberation_modes[size] = (uint8_t)val;
+                size++;
+            } else {
+                ALOGE("%s: Invalid CAC mode %d", __func__,
+                        gCamCapability[cameraId]->aberration_modes[i]);
+                break;
+            }
+        }
+
+    }
+    staticInfo.update(ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+            avail_abberation_modes,
+            size);
+
+    uint8_t avail_af_modes[CAM_FOCUS_MODE_MAX];
+    size = 0;
+    count = CAM_FOCUS_MODE_MAX;
+    count = MIN(gCamCapability[cameraId]->supported_focus_modes_cnt, count);
+    for (size_t i = 0; i < count; i++) {
+        int val = lookupFwkName(FOCUS_MODES_MAP, METADATA_MAP_SIZE(FOCUS_MODES_MAP),
+                gCamCapability[cameraId]->supported_focus_modes[i]);
+        if (NAME_NOT_FOUND != val) {
+            avail_af_modes[size] = (uint8_t)val;
+            size++;
+        }
+    }
+    staticInfo.update(ANDROID_CONTROL_AF_AVAILABLE_MODES,
+                      avail_af_modes,
+                      size);
+
+    uint8_t avail_awb_modes[CAM_WB_MODE_MAX];
+    size = 0;
+    count = CAM_WB_MODE_MAX;
+    count = MIN(gCamCapability[cameraId]->supported_white_balances_cnt, count);
+    for (size_t i = 0; i < count; i++) {
+        int val = lookupFwkName(WHITE_BALANCE_MODES_MAP,
+                METADATA_MAP_SIZE(WHITE_BALANCE_MODES_MAP),
+                gCamCapability[cameraId]->supported_white_balances[i]);
+        if (NAME_NOT_FOUND != val) {
+            avail_awb_modes[size] = (uint8_t)val;
+            size++;
+        }
+    }
+    staticInfo.update(ANDROID_CONTROL_AWB_AVAILABLE_MODES,
+                      avail_awb_modes,
+                      size);
+
+    uint8_t available_flash_levels[CAM_FLASH_FIRING_LEVEL_MAX];
+    count = CAM_FLASH_FIRING_LEVEL_MAX;
+    count = MIN(gCamCapability[cameraId]->supported_flash_firing_level_cnt,
+            count);
+    for (size_t i = 0; i < count; i++) {
+        available_flash_levels[i] =
+                gCamCapability[cameraId]->supported_firing_levels[i];
+    }
+    staticInfo.update(ANDROID_FLASH_FIRING_POWER,
+            available_flash_levels, count);
+
+    uint8_t flashAvailable;
+    if (gCamCapability[cameraId]->flash_available)
+        flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_TRUE;
+    else
+        flashAvailable = ANDROID_FLASH_INFO_AVAILABLE_FALSE;
+    staticInfo.update(ANDROID_FLASH_INFO_AVAILABLE,
+            &flashAvailable, 1);
+
+    Vector<uint8_t> avail_ae_modes;
+    count = CAM_AE_MODE_MAX;
+    count = MIN(gCamCapability[cameraId]->supported_ae_modes_cnt, count);
+    for (size_t i = 0; i < count; i++) {
+        avail_ae_modes.add(gCamCapability[cameraId]->supported_ae_modes[i]);
+    }
+    if (flashAvailable) {
+        avail_ae_modes.add(ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH);
+        avail_ae_modes.add(ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH);
+    }
+    staticInfo.update(ANDROID_CONTROL_AE_AVAILABLE_MODES,
+                      avail_ae_modes.array(),
+                      avail_ae_modes.size());
+
+    int32_t sensitivity_range[2];
+    sensitivity_range[0] = gCamCapability[cameraId]->sensitivity_range.min_sensitivity;
+    sensitivity_range[1] = gCamCapability[cameraId]->sensitivity_range.max_sensitivity;
+    staticInfo.update(ANDROID_SENSOR_INFO_SENSITIVITY_RANGE,
+                      sensitivity_range,
+                      sizeof(sensitivity_range) / sizeof(int32_t));
+
+    staticInfo.update(ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY,
+                      &gCamCapability[cameraId]->max_analog_sensitivity,
+                      1);
+
+    int32_t sensor_orientation = (int32_t)gCamCapability[cameraId]->sensor_mount_angle;
+    staticInfo.update(ANDROID_SENSOR_ORIENTATION,
+                      &sensor_orientation,
+                      1);
+
+    int32_t max_output_streams[] = {
+            MAX_STALLING_STREAMS,
+            MAX_PROCESSED_STREAMS,
+            MAX_RAW_STREAMS};
+    staticInfo.update(ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS,
+            max_output_streams,
+            sizeof(max_output_streams)/sizeof(max_output_streams[0]));
+
+    uint8_t avail_leds = 0;
+    staticInfo.update(ANDROID_LED_AVAILABLE_LEDS,
+                      &avail_leds, 0);
+
+    uint8_t focus_dist_calibrated;
+    int val = lookupFwkName(FOCUS_CALIBRATION_MAP, METADATA_MAP_SIZE(FOCUS_CALIBRATION_MAP),
+            gCamCapability[cameraId]->focus_dist_calibrated);
+    if (NAME_NOT_FOUND != val) {
+        focus_dist_calibrated = (uint8_t)val;
+        staticInfo.update(ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION,
+                     &focus_dist_calibrated, 1);
+    }
+
+    int32_t avail_testpattern_modes[MAX_TEST_PATTERN_CNT];
+    size = 0;
+    count = MIN(gCamCapability[cameraId]->supported_test_pattern_modes_cnt,
+            MAX_TEST_PATTERN_CNT);
+    for (size_t i = 0; i < count; i++) {
+        int testpatternMode = lookupFwkName(TEST_PATTERN_MAP, METADATA_MAP_SIZE(TEST_PATTERN_MAP),
+                gCamCapability[cameraId]->supported_test_pattern_modes[i]);
+        if (NAME_NOT_FOUND != testpatternMode) {
+            avail_testpattern_modes[size] = testpatternMode;
+            size++;
+        }
+    }
+    staticInfo.update(ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
+                      avail_testpattern_modes,
+                      size);
+
+    uint8_t max_pipeline_depth = (uint8_t)(MAX_INFLIGHT_REQUESTS + EMPTY_PIPELINE_DELAY + FRAME_SKIP_DELAY);
+    staticInfo.update(ANDROID_REQUEST_PIPELINE_MAX_DEPTH,
+                      &max_pipeline_depth,
+                      1);
+
+    int32_t partial_result_count = PARTIAL_RESULT_COUNT;
+    staticInfo.update(ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
+                      &partial_result_count,
+                       1);
+
+    int32_t max_stall_duration = MAX_REPROCESS_STALL;
+    staticInfo.update(ANDROID_REPROCESS_MAX_CAPTURE_STALL, &max_stall_duration, 1);
+
+    Vector<uint8_t> available_capabilities;
+    available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE);
+    available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_SENSOR);
+    available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MANUAL_POST_PROCESSING);
+    available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_READ_SENSOR_SETTINGS);
+    available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BURST_CAPTURE);
+    available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_PRIVATE_REPROCESSING);
+    available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_YUV_REPROCESSING);
+    if (hfrEnable && available_hfr_configs.array()) {
+        available_capabilities.add(
+                ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO);
+    }
+
+    if (CAM_SENSOR_YUV != gCamCapability[cameraId]->sensor_type.sens_type) {
+        available_capabilities.add(ANDROID_REQUEST_AVAILABLE_CAPABILITIES_RAW);
+    }
+    staticInfo.update(ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
+            available_capabilities.array(),
+            available_capabilities.size());
+
+    //aeLockAvailable to be set to true if capabilities has MANUAL_SENSOR and/or
+    //BURST_CAPTURE.
+    uint8_t aeLockAvailable = (gCamCapability[cameraId]->sensor_type.sens_type == CAM_SENSOR_RAW) ?
+            ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE : ANDROID_CONTROL_AE_LOCK_AVAILABLE_FALSE;
+
+    staticInfo.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+            &aeLockAvailable, 1);
+
+    //awbLockAvailable to be set to true if capabilities has
+    //MANUAL_POST_PROCESSING and/or BURST_CAPTURE.
+    uint8_t awbLockAvailable = (gCamCapability[cameraId]->sensor_type.sens_type == CAM_SENSOR_RAW) ?
+            ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE : ANDROID_CONTROL_AWB_LOCK_AVAILABLE_FALSE;
+
+    staticInfo.update(ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+            &awbLockAvailable, 1);
+
+    int32_t max_input_streams = 1;
+    staticInfo.update(ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS,
+                      &max_input_streams,
+                      1);
+
+    /* format of the map is : input format, num_output_formats, outputFormat1,..,outputFormatN */
+    int32_t io_format_map[] = {HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, 2,
+            HAL_PIXEL_FORMAT_BLOB, HAL_PIXEL_FORMAT_YCbCr_420_888,
+            HAL_PIXEL_FORMAT_YCbCr_420_888, 2, HAL_PIXEL_FORMAT_BLOB,
+            HAL_PIXEL_FORMAT_YCbCr_420_888};
+    staticInfo.update(ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP,
+                      io_format_map, sizeof(io_format_map)/sizeof(io_format_map[0]));
+
+    int32_t max_latency = (limitedDevice) ?
+            CAM_MAX_SYNC_LATENCY : ANDROID_SYNC_MAX_LATENCY_PER_FRAME_CONTROL;
+    staticInfo.update(ANDROID_SYNC_MAX_LATENCY,
+                      &max_latency,
+                      1);
+
+    uint8_t available_hot_pixel_modes[] = {ANDROID_HOT_PIXEL_MODE_FAST,
+                                           ANDROID_HOT_PIXEL_MODE_HIGH_QUALITY};
+    staticInfo.update(ANDROID_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES,
+            available_hot_pixel_modes,
+            sizeof(available_hot_pixel_modes)/sizeof(available_hot_pixel_modes[0]));
+
+    uint8_t available_shading_modes[] = {ANDROID_SHADING_MODE_OFF,
+                                         ANDROID_SHADING_MODE_FAST,
+                                         ANDROID_SHADING_MODE_HIGH_QUALITY};
+    staticInfo.update(ANDROID_SHADING_AVAILABLE_MODES,
+                      available_shading_modes,
+                      3);
+
+    uint8_t available_lens_shading_map_modes[] = {ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF,
+                                                  ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON};
+    staticInfo.update(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES,
+                      available_lens_shading_map_modes,
+                      2);
+
+    uint8_t available_edge_modes[] = {ANDROID_EDGE_MODE_OFF,
+                                      ANDROID_EDGE_MODE_FAST,
+                                      ANDROID_EDGE_MODE_HIGH_QUALITY,
+                                      ANDROID_EDGE_MODE_ZERO_SHUTTER_LAG};
+    staticInfo.update(ANDROID_EDGE_AVAILABLE_EDGE_MODES,
+            available_edge_modes,
+            sizeof(available_edge_modes)/sizeof(available_edge_modes[0]));
+
+    uint8_t available_noise_red_modes[] = {ANDROID_NOISE_REDUCTION_MODE_OFF,
+                                           ANDROID_NOISE_REDUCTION_MODE_FAST,
+                                           ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY,
+                                           ANDROID_NOISE_REDUCTION_MODE_MINIMAL,
+                                           ANDROID_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG};
+    staticInfo.update(ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+            available_noise_red_modes,
+            sizeof(available_noise_red_modes)/sizeof(available_noise_red_modes[0]));
+
+    uint8_t available_tonemap_modes[] = {ANDROID_TONEMAP_MODE_CONTRAST_CURVE,
+                                         ANDROID_TONEMAP_MODE_FAST,
+                                         ANDROID_TONEMAP_MODE_HIGH_QUALITY};
+    staticInfo.update(ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES,
+            available_tonemap_modes,
+            sizeof(available_tonemap_modes)/sizeof(available_tonemap_modes[0]));
+
+    uint8_t available_hot_pixel_map_modes[] = {ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF};
+    staticInfo.update(ANDROID_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES,
+            available_hot_pixel_map_modes,
+            sizeof(available_hot_pixel_map_modes)/sizeof(available_hot_pixel_map_modes[0]));
+
+    val = lookupFwkName(REFERENCE_ILLUMINANT_MAP, METADATA_MAP_SIZE(REFERENCE_ILLUMINANT_MAP),
+            gCamCapability[cameraId]->reference_illuminant1);
+    if (NAME_NOT_FOUND != val) {
+        uint8_t fwkReferenceIlluminant = (uint8_t)val;
+        staticInfo.update(ANDROID_SENSOR_REFERENCE_ILLUMINANT1, &fwkReferenceIlluminant, 1);
+    }
+
+    val = lookupFwkName(REFERENCE_ILLUMINANT_MAP, METADATA_MAP_SIZE(REFERENCE_ILLUMINANT_MAP),
+            gCamCapability[cameraId]->reference_illuminant2);
+    if (NAME_NOT_FOUND != val) {
+        uint8_t fwkReferenceIlluminant = (uint8_t)val;
+        staticInfo.update(ANDROID_SENSOR_REFERENCE_ILLUMINANT2, &fwkReferenceIlluminant, 1);
+    }
+
+    staticInfo.update(ANDROID_SENSOR_FORWARD_MATRIX1, (camera_metadata_rational_t *)
+            (void *)gCamCapability[cameraId]->forward_matrix1,
+            FORWARD_MATRIX_COLS * FORWARD_MATRIX_ROWS);
+
+    staticInfo.update(ANDROID_SENSOR_FORWARD_MATRIX2, (camera_metadata_rational_t *)
+            (void *)gCamCapability[cameraId]->forward_matrix2,
+            FORWARD_MATRIX_COLS * FORWARD_MATRIX_ROWS);
+
+    staticInfo.update(ANDROID_SENSOR_COLOR_TRANSFORM1, (camera_metadata_rational_t *)
+            (void *)gCamCapability[cameraId]->color_transform1,
+            COLOR_TRANSFORM_COLS * COLOR_TRANSFORM_ROWS);
+
+    staticInfo.update(ANDROID_SENSOR_COLOR_TRANSFORM2, (camera_metadata_rational_t *)
+            (void *)gCamCapability[cameraId]->color_transform2,
+            COLOR_TRANSFORM_COLS * COLOR_TRANSFORM_ROWS);
+
+    staticInfo.update(ANDROID_SENSOR_CALIBRATION_TRANSFORM1, (camera_metadata_rational_t *)
+            (void *)gCamCapability[cameraId]->calibration_transform1,
+            CAL_TRANSFORM_COLS * CAL_TRANSFORM_ROWS);
+
+    staticInfo.update(ANDROID_SENSOR_CALIBRATION_TRANSFORM2, (camera_metadata_rational_t *)
+            (void *)gCamCapability[cameraId]->calibration_transform2,
+            CAL_TRANSFORM_COLS * CAL_TRANSFORM_ROWS);
+
+    int32_t request_keys_basic[] = {ANDROID_COLOR_CORRECTION_MODE,
+       ANDROID_COLOR_CORRECTION_TRANSFORM, ANDROID_COLOR_CORRECTION_GAINS,
+       ANDROID_COLOR_CORRECTION_ABERRATION_MODE,
+       ANDROID_CONTROL_AE_ANTIBANDING_MODE, ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+       ANDROID_CONTROL_AE_LOCK, ANDROID_CONTROL_AE_MODE,
+       ANDROID_CONTROL_AE_REGIONS, ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
+       ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, ANDROID_CONTROL_AF_MODE,
+       ANDROID_CONTROL_AF_TRIGGER, ANDROID_CONTROL_AWB_LOCK,
+       ANDROID_CONTROL_AWB_MODE, ANDROID_CONTROL_CAPTURE_INTENT,
+       ANDROID_CONTROL_EFFECT_MODE, ANDROID_CONTROL_MODE,
+       ANDROID_CONTROL_SCENE_MODE, ANDROID_CONTROL_VIDEO_STABILIZATION_MODE,
+       ANDROID_DEMOSAIC_MODE, ANDROID_EDGE_MODE,
+       ANDROID_FLASH_FIRING_POWER, ANDROID_FLASH_FIRING_TIME, ANDROID_FLASH_MODE,
+       ANDROID_JPEG_GPS_COORDINATES,
+       ANDROID_JPEG_GPS_PROCESSING_METHOD, ANDROID_JPEG_GPS_TIMESTAMP,
+       ANDROID_JPEG_ORIENTATION, ANDROID_JPEG_QUALITY, ANDROID_JPEG_THUMBNAIL_QUALITY,
+       ANDROID_JPEG_THUMBNAIL_SIZE, ANDROID_LENS_APERTURE, ANDROID_LENS_FILTER_DENSITY,
+       ANDROID_LENS_FOCAL_LENGTH, ANDROID_LENS_FOCUS_DISTANCE,
+       ANDROID_LENS_OPTICAL_STABILIZATION_MODE, ANDROID_NOISE_REDUCTION_MODE,
+       ANDROID_REQUEST_ID, ANDROID_REQUEST_TYPE,
+       ANDROID_SCALER_CROP_REGION, ANDROID_SENSOR_EXPOSURE_TIME,
+       ANDROID_SENSOR_FRAME_DURATION, ANDROID_HOT_PIXEL_MODE,
+       ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE,
+       ANDROID_SENSOR_SENSITIVITY, ANDROID_SHADING_MODE,
+       ANDROID_STATISTICS_FACE_DETECT_MODE,
+       ANDROID_STATISTICS_HISTOGRAM_MODE, ANDROID_STATISTICS_SHARPNESS_MAP_MODE,
+       ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, ANDROID_TONEMAP_CURVE_BLUE,
+       ANDROID_TONEMAP_CURVE_GREEN, ANDROID_TONEMAP_CURVE_RED, ANDROID_TONEMAP_MODE,
+       ANDROID_BLACK_LEVEL_LOCK };
+
+    size_t request_keys_cnt =
+            sizeof(request_keys_basic)/sizeof(request_keys_basic[0]);
+    Vector<int32_t> available_request_keys;
+    available_request_keys.appendArray(request_keys_basic, request_keys_cnt);
+    if (gCamCapability[cameraId]->supported_focus_modes_cnt > 1) {
+        available_request_keys.add(ANDROID_CONTROL_AF_REGIONS);
+    }
+
+    staticInfo.update(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS,
+            available_request_keys.array(), available_request_keys.size());
+
+    int32_t result_keys_basic[] = {ANDROID_COLOR_CORRECTION_TRANSFORM,
+       ANDROID_COLOR_CORRECTION_GAINS, ANDROID_CONTROL_AE_MODE, ANDROID_CONTROL_AE_REGIONS,
+       ANDROID_CONTROL_AE_STATE, ANDROID_CONTROL_AF_MODE,
+       ANDROID_CONTROL_AF_STATE, ANDROID_CONTROL_AWB_MODE,
+       ANDROID_CONTROL_AWB_STATE, ANDROID_CONTROL_MODE, ANDROID_EDGE_MODE,
+       ANDROID_FLASH_FIRING_POWER, ANDROID_FLASH_FIRING_TIME, ANDROID_FLASH_MODE,
+       ANDROID_FLASH_STATE, ANDROID_JPEG_GPS_COORDINATES, ANDROID_JPEG_GPS_PROCESSING_METHOD,
+       ANDROID_JPEG_GPS_TIMESTAMP, ANDROID_JPEG_ORIENTATION, ANDROID_JPEG_QUALITY,
+       ANDROID_JPEG_THUMBNAIL_QUALITY, ANDROID_JPEG_THUMBNAIL_SIZE, ANDROID_LENS_APERTURE,
+       ANDROID_LENS_FILTER_DENSITY, ANDROID_LENS_FOCAL_LENGTH, ANDROID_LENS_FOCUS_DISTANCE,
+       ANDROID_LENS_FOCUS_RANGE, ANDROID_LENS_STATE, ANDROID_LENS_OPTICAL_STABILIZATION_MODE,
+       ANDROID_NOISE_REDUCTION_MODE, ANDROID_REQUEST_ID,
+       ANDROID_SCALER_CROP_REGION, ANDROID_SHADING_MODE, ANDROID_SENSOR_EXPOSURE_TIME,
+       ANDROID_SENSOR_FRAME_DURATION, ANDROID_SENSOR_SENSITIVITY,
+       ANDROID_SENSOR_TIMESTAMP, ANDROID_SENSOR_NEUTRAL_COLOR_POINT,
+       ANDROID_SENSOR_PROFILE_TONE_CURVE, ANDROID_BLACK_LEVEL_LOCK, ANDROID_TONEMAP_CURVE_BLUE,
+       ANDROID_TONEMAP_CURVE_GREEN, ANDROID_TONEMAP_CURVE_RED, ANDROID_TONEMAP_MODE,
+       ANDROID_STATISTICS_FACE_DETECT_MODE, ANDROID_STATISTICS_HISTOGRAM_MODE,
+       ANDROID_STATISTICS_SHARPNESS_MAP, ANDROID_STATISTICS_SHARPNESS_MAP_MODE,
+       ANDROID_STATISTICS_PREDICTED_COLOR_GAINS, ANDROID_STATISTICS_PREDICTED_COLOR_TRANSFORM,
+       ANDROID_STATISTICS_SCENE_FLICKER, ANDROID_STATISTICS_FACE_RECTANGLES,
+       ANDROID_STATISTICS_FACE_SCORES,
+       ANDROID_SENSOR_DYNAMIC_BLACK_LEVEL,
+       ANDROID_SENSOR_DYNAMIC_WHITE_LEVEL};
+    size_t result_keys_cnt =
+            sizeof(result_keys_basic)/sizeof(result_keys_basic[0]);
+
+    Vector<int32_t> available_result_keys;
+    available_result_keys.appendArray(result_keys_basic, result_keys_cnt);
+    if (gCamCapability[cameraId]->supported_focus_modes_cnt > 1) {
+        available_result_keys.add(ANDROID_CONTROL_AF_REGIONS);
+    }
+    if (CAM_SENSOR_YUV != gCamCapability[cameraId]->sensor_type.sens_type) {
+       available_result_keys.add(ANDROID_SENSOR_NOISE_PROFILE);
+       available_result_keys.add(ANDROID_SENSOR_GREEN_SPLIT);
+    }
+    if (supportedFaceDetectMode == 1) {
+        available_result_keys.add(ANDROID_STATISTICS_FACE_RECTANGLES);
+        available_result_keys.add(ANDROID_STATISTICS_FACE_SCORES);
+    } else if ((supportedFaceDetectMode == 2) ||
+            (supportedFaceDetectMode == 3)) {
+        available_result_keys.add(ANDROID_STATISTICS_FACE_IDS);
+        available_result_keys.add(ANDROID_STATISTICS_FACE_LANDMARKS);
+    }
+    staticInfo.update(ANDROID_REQUEST_AVAILABLE_RESULT_KEYS,
+            available_result_keys.array(), available_result_keys.size());
+
+    int32_t characteristics_keys_basic[] = {ANDROID_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES,
+       ANDROID_CONTROL_AE_AVAILABLE_MODES, ANDROID_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
+       ANDROID_CONTROL_AE_COMPENSATION_RANGE, ANDROID_CONTROL_AE_COMPENSATION_STEP,
+       ANDROID_CONTROL_AF_AVAILABLE_MODES, ANDROID_CONTROL_AVAILABLE_EFFECTS,
+       ANDROID_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES,
+       ANDROID_SCALER_CROPPING_TYPE,
+       ANDROID_SYNC_MAX_LATENCY,
+       ANDROID_SENSOR_INFO_TIMESTAMP_SOURCE,
+       ANDROID_CONTROL_AVAILABLE_SCENE_MODES,
+       ANDROID_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES,
+       ANDROID_CONTROL_AWB_AVAILABLE_MODES, ANDROID_CONTROL_MAX_REGIONS,
+       ANDROID_CONTROL_SCENE_MODE_OVERRIDES,ANDROID_FLASH_INFO_AVAILABLE,
+       ANDROID_FLASH_INFO_CHARGE_DURATION, ANDROID_JPEG_AVAILABLE_THUMBNAIL_SIZES,
+       ANDROID_JPEG_MAX_SIZE, ANDROID_LENS_INFO_AVAILABLE_APERTURES,
+       ANDROID_LENS_INFO_AVAILABLE_FILTER_DENSITIES,
+       ANDROID_LENS_INFO_AVAILABLE_FOCAL_LENGTHS,
+       ANDROID_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION,
+       ANDROID_LENS_INFO_HYPERFOCAL_DISTANCE, ANDROID_LENS_INFO_MINIMUM_FOCUS_DISTANCE,
+       ANDROID_LENS_INFO_SHADING_MAP_SIZE, ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION,
+       ANDROID_LENS_FACING,
+       ANDROID_REQUEST_MAX_NUM_OUTPUT_STREAMS, ANDROID_REQUEST_MAX_NUM_INPUT_STREAMS,
+       ANDROID_REQUEST_PIPELINE_MAX_DEPTH, ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
+       ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS, ANDROID_REQUEST_AVAILABLE_RESULT_KEYS,
+       ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, ANDROID_REQUEST_PARTIAL_RESULT_COUNT,
+       ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM,
+       ANDROID_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP,
+       ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+       /*ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,*/
+       ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS, ANDROID_SENSOR_FORWARD_MATRIX1,
+       ANDROID_SENSOR_REFERENCE_ILLUMINANT1, ANDROID_SENSOR_REFERENCE_ILLUMINANT2,
+       ANDROID_SENSOR_FORWARD_MATRIX2, ANDROID_SENSOR_COLOR_TRANSFORM1,
+       ANDROID_SENSOR_COLOR_TRANSFORM2, ANDROID_SENSOR_CALIBRATION_TRANSFORM1,
+       ANDROID_SENSOR_CALIBRATION_TRANSFORM2, ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE,
+       ANDROID_SENSOR_INFO_SENSITIVITY_RANGE, ANDROID_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT,
+       ANDROID_SENSOR_INFO_EXPOSURE_TIME_RANGE, ANDROID_SENSOR_INFO_MAX_FRAME_DURATION,
+       ANDROID_SENSOR_INFO_PHYSICAL_SIZE, ANDROID_SENSOR_INFO_PIXEL_ARRAY_SIZE,
+       ANDROID_SENSOR_INFO_WHITE_LEVEL, ANDROID_SENSOR_BASE_GAIN_FACTOR,
+       ANDROID_SENSOR_BLACK_LEVEL_PATTERN, ANDROID_SENSOR_MAX_ANALOG_SENSITIVITY,
+       ANDROID_SENSOR_ORIENTATION, ANDROID_SENSOR_AVAILABLE_TEST_PATTERN_MODES,
+       ANDROID_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES,
+       ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT,
+       ANDROID_STATISTICS_INFO_MAX_FACE_COUNT, ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT,
+       ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE,
+       ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE, ANDROID_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES,
+       ANDROID_EDGE_AVAILABLE_EDGE_MODES,
+       ANDROID_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES,
+       ANDROID_TONEMAP_AVAILABLE_TONE_MAP_MODES,
+       ANDROID_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES,
+       ANDROID_TONEMAP_MAX_CURVE_POINTS,
+       ANDROID_CONTROL_AVAILABLE_MODES,
+       ANDROID_CONTROL_AE_LOCK_AVAILABLE,
+       ANDROID_CONTROL_AWB_LOCK_AVAILABLE,
+       ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES,
+       ANDROID_SHADING_AVAILABLE_MODES,
+       ANDROID_INFO_SUPPORTED_HARDWARE_LEVEL };
+
+    Vector<int32_t> available_characteristics_keys;
+    available_characteristics_keys.appendArray(characteristics_keys_basic,
+            sizeof(characteristics_keys_basic)/sizeof(int32_t));
+    if (hasBlackRegions) {
+        available_characteristics_keys.add(ANDROID_SENSOR_OPTICAL_BLACK_REGIONS);
+    }
+    staticInfo.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS,
+                      available_characteristics_keys.array(),
+                      available_characteristics_keys.size());
+
+    /*available stall durations depend on the hw + sw and will be different for different devices */
+    /*have to add for raw after implementation*/
+    int32_t stall_formats[] = {HAL_PIXEL_FORMAT_BLOB, ANDROID_SCALER_AVAILABLE_FORMATS_RAW16};
+    size_t stall_formats_count = sizeof(stall_formats)/sizeof(int32_t);
+
+    count = MIN(gCamCapability[cameraId]->picture_sizes_tbl_cnt, MAX_SIZES_CNT);
+    size_t raw_count = MIN(gCamCapability[cameraId]->supported_raw_dim_cnt,
+            MAX_SIZES_CNT);
+    size_t available_stall_size = count * 4;
+    int64_t available_stall_durations[available_stall_size];
+    idx = 0;
+    for (uint32_t j = 0; j < stall_formats_count; j++) {
+       if (stall_formats[j] == HAL_PIXEL_FORMAT_BLOB) {
+          for (uint32_t i = 0; i < count; i++) {
+             available_stall_durations[idx]   = stall_formats[j];
+             available_stall_durations[idx+1] = gCamCapability[cameraId]->picture_sizes_tbl[i].width;
+             available_stall_durations[idx+2] = gCamCapability[cameraId]->picture_sizes_tbl[i].height;
+             available_stall_durations[idx+3] = gCamCapability[cameraId]->jpeg_stall_durations[i];
+             idx+=4;
+          }
+       } else {
+          for (uint32_t i = 0; i < raw_count; i++) {
+             available_stall_durations[idx]   = stall_formats[j];
+             available_stall_durations[idx+1] = gCamCapability[cameraId]->raw_dim[i].width;
+             available_stall_durations[idx+2] = gCamCapability[cameraId]->raw_dim[i].height;
+             available_stall_durations[idx+3] = gCamCapability[cameraId]->raw16_stall_durations[i];
+             idx+=4;
+          }
+       }
+    }
+    staticInfo.update(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+                      available_stall_durations,
+                      idx);
+    //QCAMERA3_OPAQUE_RAW
+    uint8_t raw_format = QCAMERA3_OPAQUE_RAW_FORMAT_LEGACY;
+    cam_format_t fmt = CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG;
+    switch (gCamCapability[cameraId]->opaque_raw_fmt) {
+    case LEGACY_RAW:
+        if (gCamCapability[cameraId]->white_level == MAX_VALUE_8BIT)
+            fmt = CAM_FORMAT_BAYER_QCOM_RAW_8BPP_GBRG;
+        else if (gCamCapability[cameraId]->white_level == MAX_VALUE_10BIT)
+            fmt = CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG;
+        else if (gCamCapability[cameraId]->white_level == MAX_VALUE_12BIT)
+            fmt = CAM_FORMAT_BAYER_QCOM_RAW_12BPP_GBRG;
+        raw_format = QCAMERA3_OPAQUE_RAW_FORMAT_LEGACY;
+        break;
+    case MIPI_RAW:
+        if (gCamCapability[cameraId]->white_level == MAX_VALUE_8BIT)
+            fmt = CAM_FORMAT_BAYER_MIPI_RAW_8BPP_GBRG;
+        else if (gCamCapability[cameraId]->white_level == MAX_VALUE_10BIT)
+            fmt = CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG;
+        else if (gCamCapability[cameraId]->white_level == MAX_VALUE_12BIT)
+            fmt = CAM_FORMAT_BAYER_MIPI_RAW_12BPP_GBRG;
+        raw_format = QCAMERA3_OPAQUE_RAW_FORMAT_MIPI;
+        break;
+    default:
+        ALOGE("%s: unknown opaque_raw_format %d", __func__,
+                gCamCapability[cameraId]->opaque_raw_fmt);
+        break;
+    }
+    staticInfo.update(QCAMERA3_OPAQUE_RAW_FORMAT, &raw_format, 1);
+
+    int32_t strides[3*raw_count];
+    for (size_t i = 0; i < raw_count; i++) {
+        cam_stream_buf_plane_info_t buf_planes;
+        strides[i*3] = gCamCapability[cameraId]->raw_dim[i].width;
+        strides[i*3+1] = gCamCapability[cameraId]->raw_dim[i].height;
+        mm_stream_calc_offset_raw(fmt, &gCamCapability[cameraId]->raw_dim[i],
+            &gCamCapability[cameraId]->padding_info, &buf_planes);
+        strides[i*3+2] = buf_planes.plane_info.mp[0].stride;
+    }
+    staticInfo.update(QCAMERA3_OPAQUE_RAW_STRIDES, strides,
+            3*raw_count);
+
+    gStaticMetadata[cameraId] = staticInfo.release();
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : makeTable
+ *
+ * DESCRIPTION: make a table of sizes
+ *
+ * PARAMETERS :
+ *
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::makeTable(cam_dimension_t* dimTable, size_t size,
+        size_t max_size, int32_t *sizeTable)
+{
+    size_t j = 0;
+    if (size > max_size) {
+       size = max_size;
+    }
+    for (size_t i = 0; i < size; i++) {
+        sizeTable[j] = dimTable[i].width;
+        sizeTable[j+1] = dimTable[i].height;
+        j+=2;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : makeFPSTable
+ *
+ * DESCRIPTION: make a table of fps ranges
+ *
+ * PARAMETERS :
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::makeFPSTable(cam_fps_range_t* fpsTable, size_t size,
+        size_t max_size, int32_t *fpsRangesTable)
+{
+    size_t j = 0;
+    if (size > max_size) {
+       size = max_size;
+    }
+    for (size_t i = 0; i < size; i++) {
+        fpsRangesTable[j] = (int32_t)fpsTable[i].min_fps;
+        fpsRangesTable[j+1] = (int32_t)fpsTable[i].max_fps;
+        j+=2;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : makeOverridesList
+ *
+ * DESCRIPTION: make a list of scene mode overrides
+ *
+ * PARAMETERS :
+ *
+ *
+ *==========================================================================*/
+void QCamera3HardwareInterface::makeOverridesList(
+        cam_scene_mode_overrides_t* overridesTable, size_t size, size_t max_size,
+        uint8_t *overridesList, uint8_t *supported_indexes, uint32_t camera_id)
+{
+    /*daemon will give a list of overrides for all scene modes.
+      However we should send the fwk only the overrides for the scene modes
+      supported by the framework*/
+    size_t j = 0;
+    if (size > max_size) {
+       size = max_size;
+    }
+    size_t focus_count = CAM_FOCUS_MODE_MAX;
+    focus_count = MIN(gCamCapability[camera_id]->supported_focus_modes_cnt,
+            focus_count);
+    for (size_t i = 0; i < size; i++) {
+        bool supt = false;
+        size_t index = supported_indexes[i];
+        overridesList[j] = gCamCapability[camera_id]->flash_available ?
+                ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH : ANDROID_CONTROL_AE_MODE_ON;
+        int val = lookupFwkName(WHITE_BALANCE_MODES_MAP,
+                METADATA_MAP_SIZE(WHITE_BALANCE_MODES_MAP),
+                overridesTable[index].awb_mode);
+        if (NAME_NOT_FOUND != val) {
+            overridesList[j+1] = (uint8_t)val;
+        }
+        uint8_t focus_override = overridesTable[index].af_mode;
+        for (size_t k = 0; k < focus_count; k++) {
+           if (gCamCapability[camera_id]->supported_focus_modes[k] == focus_override) {
+              supt = true;
+              break;
+           }
+        }
+        if (supt) {
+            val = lookupFwkName(FOCUS_MODES_MAP, METADATA_MAP_SIZE(FOCUS_MODES_MAP),
+                    focus_override);
+            if (NAME_NOT_FOUND != val) {
+                overridesList[j+2] = (uint8_t)val;
+            }
+        } else {
+           overridesList[j+2] = ANDROID_CONTROL_AF_MODE_OFF;
+        }
+        j+=3;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : filterJpegSizes
+ *
+ * DESCRIPTION: Returns the supported jpeg sizes based on the max dimension that
+ *              could be downscaled to
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : length of jpegSizes array
+ *==========================================================================*/
+
+size_t QCamera3HardwareInterface::filterJpegSizes(int32_t *jpegSizes, int32_t *processedSizes,
+        size_t processedSizesCnt, size_t maxCount, cam_rect_t active_array_size,
+        uint8_t downscale_factor)
+{
+    if (0 == downscale_factor) {
+        downscale_factor = 1;
+    }
+
+    int32_t min_width = active_array_size.width / downscale_factor;
+    int32_t min_height = active_array_size.height / downscale_factor;
+    size_t jpegSizesCnt = 0;
+    if (processedSizesCnt > maxCount) {
+        processedSizesCnt = maxCount;
+    }
+    for (size_t i = 0; i < processedSizesCnt; i+=2) {
+        if (processedSizes[i] >= min_width && processedSizes[i+1] >= min_height) {
+            jpegSizes[jpegSizesCnt] = processedSizes[i];
+            jpegSizes[jpegSizesCnt+1] = processedSizes[i+1];
+            jpegSizesCnt += 2;
+        }
+    }
+    return jpegSizesCnt;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPreviewHalPixelFormat
+ *
+ * DESCRIPTION: convert the format to type recognized by framework
+ *
+ * PARAMETERS : format : the format from backend
+ *
+ ** RETURN    : format recognized by framework
+ *
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::getScalarFormat(int32_t format)
+{
+    int32_t halPixelFormat;
+
+    switch (format) {
+    case CAM_FORMAT_YUV_420_NV12:
+        halPixelFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP;
+        break;
+    case CAM_FORMAT_YUV_420_NV21:
+        halPixelFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP;
+        break;
+    case CAM_FORMAT_YUV_420_NV21_ADRENO:
+        halPixelFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO;
+        break;
+    case CAM_FORMAT_YUV_420_YV12:
+        halPixelFormat = HAL_PIXEL_FORMAT_YV12;
+        break;
+    case CAM_FORMAT_YUV_422_NV16:
+    case CAM_FORMAT_YUV_422_NV61:
+    default:
+        halPixelFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP;
+        break;
+    }
+    return halPixelFormat;
+}
+
+/*===========================================================================
+ * FUNCTION   : computeNoiseModelEntryS
+ *
+ * DESCRIPTION: function to map a given sensitivity to the S noise
+ *              model parameters in the DNG noise model.
+ *
+ * PARAMETERS : sens : the sensor sensitivity
+ *
+ ** RETURN    : S (sensor amplification) noise
+ *
+ *==========================================================================*/
+double QCamera3HardwareInterface::computeNoiseModelEntryS(int32_t sens) {
+    double s = gCamCapability[mCameraId]->gradient_S * sens +
+            gCamCapability[mCameraId]->offset_S;
+    return ((s < 0.0) ? 0.0 : s);
+}
+
+/*===========================================================================
+ * FUNCTION   : computeNoiseModelEntryO
+ *
+ * DESCRIPTION: function to map a given sensitivity to the O noise
+ *              model parameters in the DNG noise model.
+ *
+ * PARAMETERS : sens : the sensor sensitivity
+ *
+ ** RETURN    : O (sensor readout) noise
+ *
+ *==========================================================================*/
+double QCamera3HardwareInterface::computeNoiseModelEntryO(int32_t sens) {
+    int32_t max_analog_sens = gCamCapability[mCameraId]->max_analog_sensitivity;
+    double digital_gain = (1.0 * sens / max_analog_sens) < 1.0 ?
+            1.0 : (1.0 * sens / max_analog_sens);
+    double o = gCamCapability[mCameraId]->gradient_O * sens * sens +
+            gCamCapability[mCameraId]->offset_O * digital_gain * digital_gain;
+    return ((o < 0.0) ? 0.0 : o);
+}
+
+/*===========================================================================
+ * FUNCTION   : getSensorSensitivity
+ *
+ * DESCRIPTION: convert iso_mode to an integer value
+ *
+ * PARAMETERS : iso_mode : the iso_mode supported by sensor
+ *
+ ** RETURN    : sensitivity supported by sensor
+ *
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::getSensorSensitivity(int32_t iso_mode)
+{
+    int32_t sensitivity;
+
+    switch (iso_mode) {
+    case CAM_ISO_MODE_100:
+        sensitivity = 100;
+        break;
+    case CAM_ISO_MODE_200:
+        sensitivity = 200;
+        break;
+    case CAM_ISO_MODE_400:
+        sensitivity = 400;
+        break;
+    case CAM_ISO_MODE_800:
+        sensitivity = 800;
+        break;
+    case CAM_ISO_MODE_1600:
+        sensitivity = 1600;
+        break;
+    default:
+        sensitivity = -1;
+        break;
+    }
+    return sensitivity;
+}
+
+/*===========================================================================
+ * FUNCTION   : getCamInfo
+ *
+ * DESCRIPTION: query camera capabilities
+ *
+ * PARAMETERS :
+ *   @cameraId  : camera Id
+ *   @info      : camera info struct to be filled in with camera capabilities
+ *
+ * RETURN     : int type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HardwareInterface::getCamInfo(uint32_t cameraId,
+        struct camera_info *info)
+{
+    ATRACE_CALL();
+    int rc = 0;
+
+    pthread_mutex_lock(&gCamLock);
+    if (NULL == gCamCapability[cameraId]) {
+        rc = initCapabilities(cameraId);
+        if (rc < 0) {
+            pthread_mutex_unlock(&gCamLock);
+            return rc;
+        }
+    }
+
+    if (NULL == gStaticMetadata[cameraId]) {
+        rc = initStaticMetadata(cameraId);
+        if (rc < 0) {
+            pthread_mutex_unlock(&gCamLock);
+            return rc;
+        }
+    }
+
+    switch(gCamCapability[cameraId]->position) {
+    case CAM_POSITION_BACK:
+        info->facing = CAMERA_FACING_BACK;
+        break;
+
+    case CAM_POSITION_FRONT:
+        info->facing = CAMERA_FACING_FRONT;
+        break;
+
+    default:
+        ALOGE("%s:Unknown position type for camera id:%d", __func__, cameraId);
+        rc = -1;
+        break;
+    }
+
+
+    info->orientation = (int)gCamCapability[cameraId]->sensor_mount_angle;
+    info->device_version = CAMERA_DEVICE_API_VERSION_3_3;
+    info->static_camera_characteristics = gStaticMetadata[cameraId];
+
+    //For now assume both cameras can operate independently.
+    info->conflicting_devices = NULL;
+    info->conflicting_devices_length = 0;
+
+    //resource cost is 100 * MIN(1.0, m/M),
+    //where m is throughput requirement with maximum stream configuration
+    //and M is CPP maximum throughput.
+    float max_fps = 0.0;
+    for (uint32_t i = 0;
+            i < gCamCapability[cameraId]->fps_ranges_tbl_cnt; i++) {
+        if (max_fps < gCamCapability[cameraId]->fps_ranges_tbl[i].max_fps)
+            max_fps = gCamCapability[cameraId]->fps_ranges_tbl[i].max_fps;
+    }
+    float ratio = 1.0 * MAX_PROCESSED_STREAMS *
+            gCamCapability[cameraId]->active_array_size.width *
+            gCamCapability[cameraId]->active_array_size.height * max_fps /
+            gCamCapability[cameraId]->max_pixel_bandwidth;
+    info->resource_cost = 100 * MIN(1.0, ratio);
+    ALOGI("%s: camera %d resource cost is %d", __func__, cameraId,
+            info->resource_cost);
+
+    pthread_mutex_unlock(&gCamLock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : translateCapabilityToMetadata
+ *
+ * DESCRIPTION: translate the capability into camera_metadata_t
+ *
+ * PARAMETERS : type of the request
+ *
+ *
+ * RETURN     : success: camera_metadata_t*
+ *              failure: NULL
+ *
+ *==========================================================================*/
+camera_metadata_t* QCamera3HardwareInterface::translateCapabilityToMetadata(int type)
+{
+    if (mDefaultMetadata[type] != NULL) {
+        return mDefaultMetadata[type];
+    }
+    //first time we are handling this request
+    //fill up the metadata structure using the wrapper class
+    CameraMetadata settings;
+    //translate from cam_capability_t to camera_metadata_tag_t
+    static const uint8_t requestType = ANDROID_REQUEST_TYPE_CAPTURE;
+    settings.update(ANDROID_REQUEST_TYPE, &requestType, 1);
+    int32_t defaultRequestID = 0;
+    settings.update(ANDROID_REQUEST_ID, &defaultRequestID, 1);
+
+    /* OIS disable */
+    char ois_prop[PROPERTY_VALUE_MAX];
+    memset(ois_prop, 0, sizeof(ois_prop));
+    property_get("persist.camera.ois.disable", ois_prop, "0");
+    uint8_t ois_disable = (uint8_t)atoi(ois_prop);
+
+    /* Force video to use OIS */
+    char videoOisProp[PROPERTY_VALUE_MAX];
+    memset(videoOisProp, 0, sizeof(videoOisProp));
+    property_get("persist.camera.ois.video", videoOisProp, "1");
+    uint8_t forceVideoOis = (uint8_t)atoi(videoOisProp);
+
+    // EIS enable/disable
+    char eis_prop[PROPERTY_VALUE_MAX];
+    memset(eis_prop, 0, sizeof(eis_prop));
+    property_get("persist.camera.eis.enable", eis_prop, "0");
+    const uint8_t eis_prop_set = (uint8_t)atoi(eis_prop);
+
+    const bool facingBack = gCamCapability[mCameraId]->position == CAM_POSITION_BACK;
+    // This is a bit hacky. EIS is enabled only when the above setprop
+    // is set to non-zero value and on back camera (for 2015 Nexus).
+    // Ideally, we should rely on m_bEisEnable, but we cannot guarantee
+    // configureStream is called before this function. In other words,
+    // we cannot guarantee the app will call configureStream before
+    // calling createDefaultRequest.
+    const bool eisEnabled = facingBack && eis_prop_set;
+
+    uint8_t controlIntent = 0;
+    uint8_t focusMode;
+    uint8_t vsMode;
+    uint8_t optStabMode;
+    uint8_t cacMode;
+    uint8_t edge_mode;
+    uint8_t noise_red_mode;
+    uint8_t tonemap_mode;
+    vsMode = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_OFF;
+    optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+    switch (type) {
+      case CAMERA3_TEMPLATE_PREVIEW:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_PREVIEW;
+        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
+        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
+        edge_mode = ANDROID_EDGE_MODE_FAST;
+        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
+        break;
+      case CAMERA3_TEMPLATE_STILL_CAPTURE:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_STILL_CAPTURE;
+        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
+        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_HIGH_QUALITY;
+        edge_mode = ANDROID_EDGE_MODE_HIGH_QUALITY;
+        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_HIGH_QUALITY;
+        tonemap_mode = ANDROID_TONEMAP_MODE_HIGH_QUALITY;
+        break;
+      case CAMERA3_TEMPLATE_VIDEO_RECORD:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_RECORD;
+        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+        if (eisEnabled) {
+            vsMode = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON;
+        }
+        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
+        edge_mode = ANDROID_EDGE_MODE_FAST;
+        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
+        if (forceVideoOis)
+            optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
+        break;
+      case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_VIDEO_SNAPSHOT;
+        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO;
+        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+        if (eisEnabled) {
+            vsMode = ANDROID_CONTROL_VIDEO_STABILIZATION_MODE_ON;
+        }
+        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
+        edge_mode = ANDROID_EDGE_MODE_FAST;
+        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
+        if (forceVideoOis)
+            optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
+        break;
+      case CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG:
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_ZERO_SHUTTER_LAG;
+        focusMode = ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE;
+        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
+        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
+        edge_mode = ANDROID_EDGE_MODE_ZERO_SHUTTER_LAG;
+        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG;
+        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
+        break;
+      case CAMERA3_TEMPLATE_MANUAL:
+        edge_mode = ANDROID_EDGE_MODE_FAST;
+        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
+        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_MANUAL;
+        focusMode = ANDROID_CONTROL_AF_MODE_OFF;
+        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+        break;
+      default:
+        edge_mode = ANDROID_EDGE_MODE_FAST;
+        noise_red_mode = ANDROID_NOISE_REDUCTION_MODE_FAST;
+        tonemap_mode = ANDROID_TONEMAP_MODE_FAST;
+        cacMode = ANDROID_COLOR_CORRECTION_ABERRATION_MODE_FAST;
+        controlIntent = ANDROID_CONTROL_CAPTURE_INTENT_CUSTOM;
+        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+        break;
+    }
+    settings.update(ANDROID_COLOR_CORRECTION_ABERRATION_MODE, &cacMode, 1);
+    settings.update(ANDROID_CONTROL_CAPTURE_INTENT, &controlIntent, 1);
+    settings.update(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE, &vsMode, 1);
+    if (gCamCapability[mCameraId]->supported_focus_modes_cnt == 1) {
+        focusMode = ANDROID_CONTROL_AF_MODE_OFF;
+    }
+    settings.update(ANDROID_CONTROL_AF_MODE, &focusMode, 1);
+
+    if (gCamCapability[mCameraId]->optical_stab_modes_count == 1 &&
+            gCamCapability[mCameraId]->optical_stab_modes[0] == CAM_OPT_STAB_ON)
+        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_ON;
+    else if ((gCamCapability[mCameraId]->optical_stab_modes_count == 1 &&
+            gCamCapability[mCameraId]->optical_stab_modes[0] == CAM_OPT_STAB_OFF)
+            || ois_disable)
+        optStabMode = ANDROID_LENS_OPTICAL_STABILIZATION_MODE_OFF;
+    settings.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE, &optStabMode, 1);
+
+    settings.update(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION,
+            &gCamCapability[mCameraId]->exposure_compensation_default, 1);
+
+    static const uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+    settings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
+
+    static const uint8_t awbLock = ANDROID_CONTROL_AWB_LOCK_OFF;
+    settings.update(ANDROID_CONTROL_AWB_LOCK, &awbLock, 1);
+
+    static const uint8_t awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+    settings.update(ANDROID_CONTROL_AWB_MODE, &awbMode, 1);
+
+    static const uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
+    settings.update(ANDROID_CONTROL_MODE, &controlMode, 1);
+
+    static const uint8_t effectMode = ANDROID_CONTROL_EFFECT_MODE_OFF;
+    settings.update(ANDROID_CONTROL_EFFECT_MODE, &effectMode, 1);
+
+    static const uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY;
+    settings.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+
+    static const uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
+    settings.update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
+
+    /*flash*/
+    static const uint8_t flashMode = ANDROID_FLASH_MODE_OFF;
+    settings.update(ANDROID_FLASH_MODE, &flashMode, 1);
+
+    static const uint8_t flashFiringLevel = CAM_FLASH_FIRING_LEVEL_4;
+    settings.update(ANDROID_FLASH_FIRING_POWER,
+            &flashFiringLevel, 1);
+
+    /* lens */
+    float default_aperture = gCamCapability[mCameraId]->apertures[0];
+    settings.update(ANDROID_LENS_APERTURE, &default_aperture, 1);
+
+    if (gCamCapability[mCameraId]->filter_densities_count) {
+        float default_filter_density = gCamCapability[mCameraId]->filter_densities[0];
+        settings.update(ANDROID_LENS_FILTER_DENSITY, &default_filter_density,
+                        gCamCapability[mCameraId]->filter_densities_count);
+    }
+
+    float default_focal_length = gCamCapability[mCameraId]->focal_length;
+    settings.update(ANDROID_LENS_FOCAL_LENGTH, &default_focal_length, 1);
+
+    float default_focus_distance = 0;
+    settings.update(ANDROID_LENS_FOCUS_DISTANCE, &default_focus_distance, 1);
+
+    static const uint8_t demosaicMode = ANDROID_DEMOSAIC_MODE_FAST;
+    settings.update(ANDROID_DEMOSAIC_MODE, &demosaicMode, 1);
+
+    static const uint8_t hotpixelMode = ANDROID_HOT_PIXEL_MODE_FAST;
+    settings.update(ANDROID_HOT_PIXEL_MODE, &hotpixelMode, 1);
+
+    static const int32_t testpatternMode = ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+    settings.update(ANDROID_SENSOR_TEST_PATTERN_MODE, &testpatternMode, 1);
+
+    /* face detection (default to OFF) */
+    static const uint8_t faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_OFF;
+    settings.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &faceDetectMode, 1);
+
+    static const uint8_t histogramMode = ANDROID_STATISTICS_HISTOGRAM_MODE_OFF;
+    settings.update(ANDROID_STATISTICS_HISTOGRAM_MODE, &histogramMode, 1);
+
+    static const uint8_t sharpnessMapMode = ANDROID_STATISTICS_SHARPNESS_MAP_MODE_OFF;
+    settings.update(ANDROID_STATISTICS_SHARPNESS_MAP_MODE, &sharpnessMapMode, 1);
+
+    static const uint8_t hotPixelMapMode = ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE_OFF;
+    settings.update(ANDROID_STATISTICS_HOT_PIXEL_MAP_MODE, &hotPixelMapMode, 1);
+
+    static const uint8_t lensShadingMode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
+    settings.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &lensShadingMode, 1);
+
+    static const uint8_t blackLevelLock = ANDROID_BLACK_LEVEL_LOCK_OFF;
+    settings.update(ANDROID_BLACK_LEVEL_LOCK, &blackLevelLock, 1);
+
+    /* Exposure time(Update the Min Exposure Time)*/
+    int64_t default_exposure_time = gCamCapability[mCameraId]->exposure_time_range[0];
+    settings.update(ANDROID_SENSOR_EXPOSURE_TIME, &default_exposure_time, 1);
+
+    /* frame duration */
+    static const int64_t default_frame_duration = NSEC_PER_33MSEC;
+    settings.update(ANDROID_SENSOR_FRAME_DURATION, &default_frame_duration, 1);
+
+    /* sensitivity */
+    static const int32_t default_sensitivity = 100;
+    settings.update(ANDROID_SENSOR_SENSITIVITY, &default_sensitivity, 1);
+
+    /*edge mode*/
+    settings.update(ANDROID_EDGE_MODE, &edge_mode, 1);
+
+    /*noise reduction mode*/
+    settings.update(ANDROID_NOISE_REDUCTION_MODE, &noise_red_mode, 1);
+
+    /*color correction mode*/
+    static const uint8_t color_correct_mode = ANDROID_COLOR_CORRECTION_MODE_FAST;
+    settings.update(ANDROID_COLOR_CORRECTION_MODE, &color_correct_mode, 1);
+
+    /*transform matrix mode*/
+    settings.update(ANDROID_TONEMAP_MODE, &tonemap_mode, 1);
+
+    int32_t scaler_crop_region[4];
+    scaler_crop_region[0] = 0;
+    scaler_crop_region[1] = 0;
+    scaler_crop_region[2] = gCamCapability[mCameraId]->active_array_size.width;
+    scaler_crop_region[3] = gCamCapability[mCameraId]->active_array_size.height;
+    settings.update(ANDROID_SCALER_CROP_REGION, scaler_crop_region, 4);
+
+    static const uint8_t antibanding_mode = ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO;
+    settings.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE, &antibanding_mode, 1);
+
+    /*focus distance*/
+    float focus_distance = 0.0;
+    settings.update(ANDROID_LENS_FOCUS_DISTANCE, &focus_distance, 1);
+
+    /*target fps range: use maximum range for picture, and maximum fixed range for video*/
+    float max_range = 0.0;
+    float max_fixed_fps = 0.0;
+    int32_t fps_range[2] = {0, 0};
+    for (uint32_t i = 0; i < gCamCapability[mCameraId]->fps_ranges_tbl_cnt;
+            i++) {
+        float range = gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps -
+            gCamCapability[mCameraId]->fps_ranges_tbl[i].min_fps;
+        if (type == CAMERA3_TEMPLATE_PREVIEW ||
+                type == CAMERA3_TEMPLATE_STILL_CAPTURE ||
+                type == CAMERA3_TEMPLATE_ZERO_SHUTTER_LAG) {
+            if (range > max_range) {
+                fps_range[0] =
+                    (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].min_fps;
+                fps_range[1] =
+                    (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps;
+                max_range = range;
+            }
+        } else {
+            if (range < 0.01 && max_fixed_fps <
+                    gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps) {
+                fps_range[0] =
+                    (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].min_fps;
+                fps_range[1] =
+                    (int32_t)gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps;
+                max_fixed_fps = gCamCapability[mCameraId]->fps_ranges_tbl[i].max_fps;
+            }
+        }
+    }
+    settings.update(ANDROID_CONTROL_AE_TARGET_FPS_RANGE, fps_range, 2);
+
+    /*precapture trigger*/
+    uint8_t precapture_trigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+    settings.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &precapture_trigger, 1);
+
+    /*af trigger*/
+    uint8_t af_trigger = ANDROID_CONTROL_AF_TRIGGER_IDLE;
+    settings.update(ANDROID_CONTROL_AF_TRIGGER, &af_trigger, 1);
+
+    /* ae & af regions */
+    int32_t active_region[] = {
+            gCamCapability[mCameraId]->active_array_size.left,
+            gCamCapability[mCameraId]->active_array_size.top,
+            gCamCapability[mCameraId]->active_array_size.left +
+                    gCamCapability[mCameraId]->active_array_size.width,
+            gCamCapability[mCameraId]->active_array_size.top +
+                    gCamCapability[mCameraId]->active_array_size.height,
+            0};
+    settings.update(ANDROID_CONTROL_AE_REGIONS, active_region,
+            sizeof(active_region) / sizeof(active_region[0]));
+    settings.update(ANDROID_CONTROL_AF_REGIONS, active_region,
+            sizeof(active_region) / sizeof(active_region[0]));
+
+    /* black level lock */
+    uint8_t blacklevel_lock = ANDROID_BLACK_LEVEL_LOCK_OFF;
+    settings.update(ANDROID_BLACK_LEVEL_LOCK, &blacklevel_lock, 1);
+
+    /* lens shading map mode */
+    uint8_t shadingmap_mode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF;
+    if (CAM_SENSOR_RAW == gCamCapability[mCameraId]->sensor_type.sens_type) {
+        shadingmap_mode = ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON;
+    }
+    settings.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, &shadingmap_mode, 1);
+
+    //special defaults for manual template
+    if (type == CAMERA3_TEMPLATE_MANUAL) {
+        static const uint8_t manualControlMode = ANDROID_CONTROL_MODE_OFF;
+        settings.update(ANDROID_CONTROL_MODE, &manualControlMode, 1);
+
+        static const uint8_t manualFocusMode = ANDROID_CONTROL_AF_MODE_OFF;
+        settings.update(ANDROID_CONTROL_AF_MODE, &manualFocusMode, 1);
+
+        static const uint8_t manualAeMode = ANDROID_CONTROL_AE_MODE_OFF;
+        settings.update(ANDROID_CONTROL_AE_MODE, &manualAeMode, 1);
+
+        static const uint8_t manualAwbMode = ANDROID_CONTROL_AWB_MODE_OFF;
+        settings.update(ANDROID_CONTROL_AWB_MODE, &manualAwbMode, 1);
+
+        static const uint8_t manualTonemapMode = ANDROID_TONEMAP_MODE_FAST;
+        settings.update(ANDROID_TONEMAP_MODE, &manualTonemapMode, 1);
+
+        static const uint8_t manualColorCorrectMode = ANDROID_COLOR_CORRECTION_MODE_TRANSFORM_MATRIX;
+        settings.update(ANDROID_COLOR_CORRECTION_MODE, &manualColorCorrectMode, 1);
+    }
+
+
+    /* TNR
+     * We'll use this location to determine which modes TNR will be set.
+     * We will enable TNR to be on if either of the Preview/Video stream requires TNR
+     * This is not to be confused with linking on a per stream basis that decision
+     * is still on per-session basis and will be handled as part of config stream
+     */
+    uint8_t tnr_enable = 0;
+
+    if (m_bTnrPreview || m_bTnrVideo) {
+
+        switch (type) {
+            case CAMERA3_TEMPLATE_VIDEO_RECORD:
+            case CAMERA3_TEMPLATE_VIDEO_SNAPSHOT:
+                    tnr_enable = 1;
+                    break;
+
+            default:
+                    tnr_enable = 0;
+                    break;
+        }
+
+        int32_t tnr_process_type = (int32_t)getTemporalDenoiseProcessPlate();
+        settings.update(QCAMERA3_TEMPORAL_DENOISE_ENABLE, &tnr_enable, 1);
+        settings.update(QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE, &tnr_process_type, 1);
+
+        CDBG("%s: TNR:%d with process plate %d for template:%d",
+                            __func__, tnr_enable, tnr_process_type, type);
+    }
+
+    /* CDS default */
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.camera.CDS", prop, "Auto");
+    cam_cds_mode_type_t cds_mode = CAM_CDS_MODE_AUTO;
+    cds_mode = lookupProp(CDS_MAP, METADATA_MAP_SIZE(CDS_MAP), prop);
+    if (CAM_CDS_MODE_MAX == cds_mode) {
+        cds_mode = CAM_CDS_MODE_AUTO;
+    }
+    m_CdsPreference = cds_mode;
+
+    /* Disabling CDS in templates which have TNR enabled*/
+    if (tnr_enable)
+        cds_mode = CAM_CDS_MODE_OFF;
+
+    int32_t mode = cds_mode;
+    settings.update(QCAMERA3_CDS_MODE, &mode, 1);
+    mDefaultMetadata[type] = settings.release();
+
+    return mDefaultMetadata[type];
+}
+
+/*===========================================================================
+ * FUNCTION   : setFrameParameters
+ *
+ * DESCRIPTION: set parameters per frame as requested in the metadata from
+ *              framework
+ *
+ * PARAMETERS :
+ *   @request   : request that needs to be serviced
+ *   @streamID : Stream ID of all the requested streams
+ *   @blob_request: Whether this request is a blob request or not
+ *
+ * RETURN     : success: NO_ERROR
+ *              failure:
+ *==========================================================================*/
+int QCamera3HardwareInterface::setFrameParameters(
+                    camera3_capture_request_t *request,
+                    cam_stream_ID_t streamID,
+                    int blob_request,
+                    uint32_t snapshotStreamId)
+{
+    /*translate from camera_metadata_t type to parm_type_t*/
+    int rc = 0;
+    int32_t hal_version = CAM_HAL_V3;
+
+    clear_metadata_buffer(mParameters);
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_HAL_VERSION, hal_version)) {
+        ALOGE("%s: Failed to set hal version in the parameters", __func__);
+        return BAD_VALUE;
+    }
+
+    /*we need to update the frame number in the parameters*/
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_FRAME_NUMBER,
+            request->frame_number)) {
+        ALOGE("%s: Failed to set the frame number in the parameters", __func__);
+        return BAD_VALUE;
+    }
+
+    /* Update stream id of all the requested buffers */
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_STREAM_ID, streamID)) {
+        ALOGE("%s: Failed to set stream type mask in the parameters", __func__);
+        return BAD_VALUE;
+    }
+
+    if (mUpdateDebugLevel) {
+        uint32_t dummyDebugLevel = 0;
+        /* The value of dummyDebugLevel is irrelavent. On
+         * CAM_INTF_PARM_UPDATE_DEBUG_LEVEL, read debug property */
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_UPDATE_DEBUG_LEVEL,
+                dummyDebugLevel)) {
+            ALOGE("%s: Failed to set UPDATE_DEBUG_LEVEL", __func__);
+            return BAD_VALUE;
+        }
+        mUpdateDebugLevel = false;
+    }
+
+    if(request->settings != NULL){
+        rc = translateToHalMetadata(request, mParameters, snapshotStreamId);
+        if (blob_request)
+            memcpy(mPrevParameters, mParameters, sizeof(metadata_buffer_t));
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setReprocParameters
+ *
+ * DESCRIPTION: Translate frameworks metadata to HAL metadata structure, and
+ *              return it.
+ *
+ * PARAMETERS :
+ *   @request   : request that needs to be serviced
+ *
+ * RETURN     : success: NO_ERROR
+ *              failure:
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::setReprocParameters(
+        camera3_capture_request_t *request, metadata_buffer_t *reprocParam,
+        uint32_t snapshotStreamId)
+{
+    /*translate from camera_metadata_t type to parm_type_t*/
+    int rc = 0;
+
+    if (NULL == request->settings){
+        ALOGE("%s: Reprocess settings cannot be NULL", __func__);
+        return BAD_VALUE;
+    }
+
+    if (NULL == reprocParam) {
+        ALOGE("%s: Invalid reprocessing metadata buffer", __func__);
+        return BAD_VALUE;
+    }
+    clear_metadata_buffer(reprocParam);
+
+    /*we need to update the frame number in the parameters*/
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_FRAME_NUMBER,
+            request->frame_number)) {
+        ALOGE("%s: Failed to set the frame number in the parameters", __func__);
+        return BAD_VALUE;
+    }
+
+    rc = translateToHalMetadata(request, reprocParam, snapshotStreamId);
+    if (rc < 0) {
+        ALOGE("%s: Failed to translate reproc request", __func__);
+        return rc;
+    }
+
+    CameraMetadata frame_settings;
+    frame_settings = request->settings;
+    if (frame_settings.exists(QCAMERA3_CROP_COUNT_REPROCESS) &&
+            frame_settings.exists(QCAMERA3_CROP_REPROCESS)) {
+        int32_t *crop_count =
+                frame_settings.find(QCAMERA3_CROP_COUNT_REPROCESS).data.i32;
+        int32_t *crop_data =
+                frame_settings.find(QCAMERA3_CROP_REPROCESS).data.i32;
+        int32_t *roi_map =
+                frame_settings.find(QCAMERA3_CROP_ROI_MAP_REPROCESS).data.i32;
+        if ((0 < *crop_count) && (*crop_count < MAX_NUM_STREAMS)) {
+            cam_crop_data_t crop_meta;
+            memset(&crop_meta, 0, sizeof(cam_crop_data_t));
+            crop_meta.num_of_streams = 1;
+            crop_meta.crop_info[0].crop.left   = crop_data[0];
+            crop_meta.crop_info[0].crop.top    = crop_data[1];
+            crop_meta.crop_info[0].crop.width  = crop_data[2];
+            crop_meta.crop_info[0].crop.height = crop_data[3];
+
+            crop_meta.crop_info[0].roi_map.left =
+                    roi_map[0];
+            crop_meta.crop_info[0].roi_map.top =
+                    roi_map[1];
+            crop_meta.crop_info[0].roi_map.width =
+                    roi_map[2];
+            crop_meta.crop_info[0].roi_map.height =
+                    roi_map[3];
+
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_CROP_DATA, crop_meta)) {
+                rc = BAD_VALUE;
+            }
+            CDBG("%s: Found reprocess crop data for stream %p %dx%d, %dx%d",
+                    __func__,
+                    request->input_buffer->stream,
+                    crop_meta.crop_info[0].crop.left,
+                    crop_meta.crop_info[0].crop.top,
+                    crop_meta.crop_info[0].crop.width,
+                    crop_meta.crop_info[0].crop.height);
+            CDBG("%s: Found reprocess roi map data for stream %p %dx%d, %dx%d",
+                    __func__,
+                    request->input_buffer->stream,
+                    crop_meta.crop_info[0].roi_map.left,
+                    crop_meta.crop_info[0].roi_map.top,
+                    crop_meta.crop_info[0].roi_map.width,
+                    crop_meta.crop_info[0].roi_map.height);
+            } else {
+                ALOGE("%s: Invalid reprocess crop count %d!", __func__, *crop_count);
+            }
+    } else {
+        ALOGE("%s: No crop data from matching output stream", __func__);
+    }
+
+    /* These settings are not needed for regular requests so handle them specially for
+       reprocess requests; information needed for EXIF tags */
+    if (frame_settings.exists(ANDROID_FLASH_MODE)) {
+        int val = lookupHalName(FLASH_MODES_MAP, METADATA_MAP_SIZE(FLASH_MODES_MAP),
+                    (int)frame_settings.find(ANDROID_FLASH_MODE).data.u8[0]);
+        if (NAME_NOT_FOUND != val) {
+            uint32_t flashMode = (uint32_t)val;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_FLASH_MODE, flashMode)) {
+                rc = BAD_VALUE;
+            }
+        } else {
+            ALOGE("%s: Could not map fwk flash mode %d to correct hal flash mode", __func__,
+                    frame_settings.find(ANDROID_FLASH_MODE).data.u8[0]);
+        }
+    } else {
+        CDBG_HIGH("%s: No flash mode in reprocess settings", __func__);
+    }
+
+    if (frame_settings.exists(ANDROID_FLASH_STATE)) {
+        int32_t flashState = (int32_t)frame_settings.find(ANDROID_FLASH_STATE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(reprocParam, CAM_INTF_META_FLASH_STATE, flashState)) {
+            rc = BAD_VALUE;
+        }
+    } else {
+        CDBG_HIGH("%s: No flash state in reprocess settings", __func__);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : saveRequestSettings
+ *
+ * DESCRIPTION: Add any settings that might have changed to the request settings
+ *              and save the settings to be applied on the frame
+ *
+ * PARAMETERS :
+ *   @jpegMetadata : the extracted and/or modified jpeg metadata
+ *   @request      : request with initial settings
+ *
+ * RETURN     :
+ * camera_metadata_t* : pointer to the saved request settings
+ *==========================================================================*/
+camera_metadata_t* QCamera3HardwareInterface::saveRequestSettings(
+        const CameraMetadata &jpegMetadata,
+        camera3_capture_request_t *request)
+{
+    camera_metadata_t *resultMetadata;
+    CameraMetadata camMetadata;
+    camMetadata = request->settings;
+
+    if (jpegMetadata.exists(ANDROID_JPEG_THUMBNAIL_SIZE)) {
+        int32_t thumbnail_size[2];
+        thumbnail_size[0] = jpegMetadata.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[0];
+        thumbnail_size[1] = jpegMetadata.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[1];
+        camMetadata.update(ANDROID_JPEG_THUMBNAIL_SIZE, thumbnail_size,
+                jpegMetadata.find(ANDROID_JPEG_THUMBNAIL_SIZE).count);
+    }
+
+    resultMetadata = camMetadata.release();
+    return resultMetadata;
+}
+
+/*===========================================================================
+ * FUNCTION   : setHalFpsRange
+ *
+ * DESCRIPTION: set FPS range parameter
+ *
+ *
+ * PARAMETERS :
+ *   @settings    : Metadata from framework
+ *   @hal_metadata: Metadata buffer
+ *
+ *
+ * RETURN     : success: NO_ERROR
+ *              failure:
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::setHalFpsRange(const CameraMetadata &settings,
+        metadata_buffer_t *hal_metadata)
+{
+    int32_t rc = NO_ERROR;
+    cam_fps_range_t fps_range;
+    fps_range.min_fps = (float)
+            settings.find(ANDROID_CONTROL_AE_TARGET_FPS_RANGE).data.i32[0];
+    fps_range.max_fps = (float)
+            settings.find(ANDROID_CONTROL_AE_TARGET_FPS_RANGE).data.i32[1];
+    fps_range.video_min_fps = fps_range.min_fps;
+    fps_range.video_max_fps = fps_range.max_fps;
+
+    CDBG("%s: aeTargetFpsRange fps: [%f %f]", __func__,
+            fps_range.min_fps, fps_range.max_fps);
+    /* In CONSTRAINED_HFR_MODE, sensor_fps is derived from aeTargetFpsRange as
+     * follows:
+     * ---------------------------------------------------------------|
+     *      Video stream is absent in configure_streams               |
+     *    (Camcorder preview before the first video record            |
+     * ---------------------------------------------------------------|
+     * vid_buf_requested | aeTgtFpsRng | snsrFpsMode | sensorFpsRange |
+     *                   |             |             | vid_min/max_fps|
+     * ---------------------------------------------------------------|
+     *        NO         |  [ 30, 240] |     240     |  [240, 240]    |
+     *                   |-------------|-------------|----------------|
+     *                   |  [240, 240] |     240     |  [240, 240]    |
+     * ---------------------------------------------------------------|
+     *     Video stream is present in configure_streams               |
+     * ---------------------------------------------------------------|
+     * vid_buf_requested | aeTgtFpsRng | snsrFpsMode | sensorFpsRange |
+     *                   |             |             | vid_min/max_fps|
+     * ---------------------------------------------------------------|
+     *        NO         |  [ 30, 240] |     240     |  [240, 240]    |
+     * (camcorder prev   |-------------|-------------|----------------|
+     *  after video rec  |  [240, 240] |     240     |  [240, 240]    |
+     *  is stopped)      |             |             |                |
+     * ---------------------------------------------------------------|
+     *       YES         |  [ 30, 240] |     240     |  [240, 240]    |
+     *                   |-------------|-------------|----------------|
+     *                   |  [240, 240] |     240     |  [240, 240]    |
+     * ---------------------------------------------------------------|
+     * When Video stream is absent in configure_streams,
+     * preview fps = sensor_fps / batchsize
+     * Eg: for 240fps at batchSize 4, preview = 60fps
+     *     for 120fps at batchSize 4, preview = 30fps
+     *
+     * When video stream is present in configure_streams, preview fps is as per
+     * the ratio of preview buffers to video buffers requested in process
+     * capture request
+     */
+    mBatchSize = 0;
+    if (CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE == mOpMode) {
+        fps_range.min_fps = fps_range.video_max_fps;
+        fps_range.video_min_fps = fps_range.video_max_fps;
+        int val = lookupHalName(HFR_MODE_MAP, METADATA_MAP_SIZE(HFR_MODE_MAP),
+                fps_range.max_fps);
+        if (NAME_NOT_FOUND != val) {
+            cam_hfr_mode_t hfrMode = (cam_hfr_mode_t)val;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_HFR, hfrMode)) {
+                return BAD_VALUE;
+            }
+
+            if (fps_range.max_fps >= MIN_FPS_FOR_BATCH_MODE) {
+                /* If batchmode is currently in progress and the fps changes,
+                 * set the flag to restart the sensor */
+                if((mHFRVideoFps >= MIN_FPS_FOR_BATCH_MODE) &&
+                        (mHFRVideoFps != fps_range.max_fps)) {
+                    mNeedSensorRestart = true;
+                }
+                mHFRVideoFps = fps_range.max_fps;
+                mBatchSize = mHFRVideoFps / PREVIEW_FPS_FOR_HFR;
+                if (mBatchSize > MAX_HFR_BATCH_SIZE) {
+                    mBatchSize = MAX_HFR_BATCH_SIZE;
+                }
+             }
+            CDBG("%s: hfrMode: %d batchSize: %d", __func__, hfrMode, mBatchSize);
+
+         }
+    } else {
+        /* HFR mode is session param in backend/ISP. This should be reset when
+         * in non-HFR mode  */
+        cam_hfr_mode_t hfrMode = CAM_HFR_MODE_OFF;
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_HFR, hfrMode)) {
+            return BAD_VALUE;
+        }
+    }
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_FPS_RANGE, fps_range)) {
+        return BAD_VALUE;
+    }
+    CDBG("%s: fps: [%f %f] vid_fps: [%f %f]", __func__, fps_range.min_fps,
+            fps_range.max_fps, fps_range.video_min_fps, fps_range.video_max_fps);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : translateToHalMetadata
+ *
+ * DESCRIPTION: read from the camera_metadata_t and change to parm_type_t
+ *
+ *
+ * PARAMETERS :
+ *   @request  : request sent from framework
+ *
+ *
+ * RETURN     : success: NO_ERROR
+ *              failure:
+ *==========================================================================*/
+int QCamera3HardwareInterface::translateToHalMetadata
+                                  (const camera3_capture_request_t *request,
+                                   metadata_buffer_t *hal_metadata,
+                                   uint32_t snapshotStreamId)
+{
+    int rc = 0;
+    CameraMetadata frame_settings;
+    frame_settings = request->settings;
+
+    /* Do not change the order of the following list unless you know what you are
+     * doing.
+     * The order is laid out in such a way that parameters in the front of the table
+     * may be used to override the parameters later in the table. Examples are:
+     * 1. META_MODE should precede AEC/AWB/AF MODE
+     * 2. AEC MODE should preced EXPOSURE_TIME/SENSITIVITY/FRAME_DURATION
+     * 3. AWB_MODE should precede COLOR_CORRECTION_MODE
+     * 4. Any mode should precede it's corresponding settings
+     */
+    if (frame_settings.exists(ANDROID_CONTROL_MODE)) {
+        uint8_t metaMode = frame_settings.find(ANDROID_CONTROL_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_MODE, metaMode)) {
+            rc = BAD_VALUE;
+        }
+        rc = extractSceneMode(frame_settings, metaMode, hal_metadata);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: extractSceneMode failed", __func__);
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_AE_MODE)) {
+        uint8_t fwk_aeMode =
+            frame_settings.find(ANDROID_CONTROL_AE_MODE).data.u8[0];
+        uint8_t aeMode;
+        int32_t redeye;
+
+        if (fwk_aeMode == ANDROID_CONTROL_AE_MODE_OFF ) {
+            aeMode = CAM_AE_MODE_OFF;
+        } else {
+            aeMode = CAM_AE_MODE_ON;
+        }
+        if (fwk_aeMode == ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE) {
+            redeye = 1;
+        } else {
+            redeye = 0;
+        }
+
+        int val = lookupHalName(AE_FLASH_MODE_MAP, METADATA_MAP_SIZE(AE_FLASH_MODE_MAP),
+                fwk_aeMode);
+        if (NAME_NOT_FOUND != val) {
+            int32_t flashMode = (int32_t)val;
+            ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_LED_MODE, flashMode);
+        }
+
+        ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AEC_MODE, aeMode);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_REDEYE_REDUCTION, redeye)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_AWB_MODE)) {
+        uint8_t fwk_whiteLevel = frame_settings.find(ANDROID_CONTROL_AWB_MODE).data.u8[0];
+        int val = lookupHalName(WHITE_BALANCE_MODES_MAP, METADATA_MAP_SIZE(WHITE_BALANCE_MODES_MAP),
+                fwk_whiteLevel);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t whiteLevel = (uint8_t)val;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_WHITE_BALANCE, whiteLevel)) {
+                rc = BAD_VALUE;
+            }
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_COLOR_CORRECTION_ABERRATION_MODE)) {
+        uint8_t fwk_cacMode =
+                frame_settings.find(
+                        ANDROID_COLOR_CORRECTION_ABERRATION_MODE).data.u8[0];
+        int val = lookupHalName(COLOR_ABERRATION_MAP, METADATA_MAP_SIZE(COLOR_ABERRATION_MAP),
+                fwk_cacMode);
+        if (NAME_NOT_FOUND != val) {
+            cam_aberration_mode_t cacMode = (cam_aberration_mode_t) val;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_CAC, cacMode)) {
+                rc = BAD_VALUE;
+            }
+        } else {
+            ALOGE("%s: Invalid framework CAC mode: %d", __func__, fwk_cacMode);
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_AF_MODE)) {
+        uint8_t fwk_focusMode = frame_settings.find(ANDROID_CONTROL_AF_MODE).data.u8[0];
+        int val = lookupHalName(FOCUS_MODES_MAP, METADATA_MAP_SIZE(FOCUS_MODES_MAP),
+                fwk_focusMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t focusMode = (uint8_t)val;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_FOCUS_MODE, focusMode)) {
+                rc = BAD_VALUE;
+            }
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_LENS_FOCUS_DISTANCE)) {
+        float focalDistance = frame_settings.find(ANDROID_LENS_FOCUS_DISTANCE).data.f[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_FOCUS_DISTANCE,
+                focalDistance)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_AE_ANTIBANDING_MODE)) {
+        uint8_t fwk_antibandingMode =
+                frame_settings.find(ANDROID_CONTROL_AE_ANTIBANDING_MODE).data.u8[0];
+        int val = lookupHalName(ANTIBANDING_MODES_MAP,
+                METADATA_MAP_SIZE(ANTIBANDING_MODES_MAP), fwk_antibandingMode);
+        if (NAME_NOT_FOUND != val) {
+            uint32_t hal_antibandingMode = (uint32_t)val;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_ANTIBANDING,
+                    hal_antibandingMode)) {
+                rc = BAD_VALUE;
+            }
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION)) {
+        int32_t expCompensation = frame_settings.find(
+                ANDROID_CONTROL_AE_EXPOSURE_COMPENSATION).data.i32[0];
+        if (expCompensation < gCamCapability[mCameraId]->exposure_compensation_min)
+            expCompensation = gCamCapability[mCameraId]->exposure_compensation_min;
+        if (expCompensation > gCamCapability[mCameraId]->exposure_compensation_max)
+            expCompensation = gCamCapability[mCameraId]->exposure_compensation_max;
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_EXPOSURE_COMPENSATION,
+                expCompensation)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_AE_LOCK)) {
+        uint8_t aeLock = frame_settings.find(ANDROID_CONTROL_AE_LOCK).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_AEC_LOCK, aeLock)) {
+            rc = BAD_VALUE;
+        }
+    }
+    if (frame_settings.exists(ANDROID_CONTROL_AE_TARGET_FPS_RANGE)) {
+        rc = setHalFpsRange(frame_settings, hal_metadata);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: setHalFpsRange failed", __func__);
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_AWB_LOCK)) {
+        uint8_t awbLock = frame_settings.find(ANDROID_CONTROL_AWB_LOCK).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_AWB_LOCK, awbLock)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_EFFECT_MODE)) {
+        uint8_t fwk_effectMode = frame_settings.find(ANDROID_CONTROL_EFFECT_MODE).data.u8[0];
+        int val = lookupHalName(EFFECT_MODES_MAP, METADATA_MAP_SIZE(EFFECT_MODES_MAP),
+                fwk_effectMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t effectMode = (uint8_t)val;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_EFFECT, effectMode)) {
+                rc = BAD_VALUE;
+            }
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_COLOR_CORRECTION_MODE)) {
+        uint8_t colorCorrectMode = frame_settings.find(ANDROID_COLOR_CORRECTION_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_COLOR_CORRECT_MODE,
+                colorCorrectMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_COLOR_CORRECTION_GAINS)) {
+        cam_color_correct_gains_t colorCorrectGains;
+        for (size_t i = 0; i < CC_GAINS_COUNT; i++) {
+            colorCorrectGains.gains[i] =
+                    frame_settings.find(ANDROID_COLOR_CORRECTION_GAINS).data.f[i];
+        }
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_COLOR_CORRECT_GAINS,
+                colorCorrectGains)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_COLOR_CORRECTION_TRANSFORM)) {
+        cam_color_correct_matrix_t colorCorrectTransform;
+        cam_rational_type_t transform_elem;
+        size_t num = 0;
+        for (size_t i = 0; i < CC_MATRIX_ROWS; i++) {
+           for (size_t j = 0; j < CC_MATRIX_COLS; j++) {
+              transform_elem.numerator =
+                 frame_settings.find(ANDROID_COLOR_CORRECTION_TRANSFORM).data.r[num].numerator;
+              transform_elem.denominator =
+                 frame_settings.find(ANDROID_COLOR_CORRECTION_TRANSFORM).data.r[num].denominator;
+              colorCorrectTransform.transform_matrix[i][j] = transform_elem;
+              num++;
+           }
+        }
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_COLOR_CORRECT_TRANSFORM,
+                colorCorrectTransform)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    cam_trigger_t aecTrigger;
+    aecTrigger.trigger = CAM_AEC_TRIGGER_IDLE;
+    aecTrigger.trigger_id = -1;
+    if (frame_settings.exists(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER)&&
+        frame_settings.exists(ANDROID_CONTROL_AE_PRECAPTURE_ID)) {
+        aecTrigger.trigger =
+            frame_settings.find(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER).data.u8[0];
+        aecTrigger.trigger_id =
+            frame_settings.find(ANDROID_CONTROL_AE_PRECAPTURE_ID).data.i32[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AEC_PRECAPTURE_TRIGGER,
+                aecTrigger)) {
+            rc = BAD_VALUE;
+        }
+        CDBG("%s: precaptureTrigger: %d precaptureTriggerID: %d", __func__,
+                aecTrigger.trigger, aecTrigger.trigger_id);
+    }
+
+    /*af_trigger must come with a trigger id*/
+    if (frame_settings.exists(ANDROID_CONTROL_AF_TRIGGER) &&
+        frame_settings.exists(ANDROID_CONTROL_AF_TRIGGER_ID)) {
+        cam_trigger_t af_trigger;
+        af_trigger.trigger =
+            frame_settings.find(ANDROID_CONTROL_AF_TRIGGER).data.u8[0];
+        af_trigger.trigger_id =
+            frame_settings.find(ANDROID_CONTROL_AF_TRIGGER_ID).data.i32[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AF_TRIGGER, af_trigger)) {
+            rc = BAD_VALUE;
+        }
+        CDBG("%s: AfTrigger: %d AfTriggerID: %d", __func__,
+                af_trigger.trigger, af_trigger.trigger_id);
+    }
+
+    if (frame_settings.exists(ANDROID_DEMOSAIC_MODE)) {
+        int32_t demosaic = frame_settings.find(ANDROID_DEMOSAIC_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_DEMOSAIC, demosaic)) {
+            rc = BAD_VALUE;
+        }
+    }
+    if (frame_settings.exists(ANDROID_EDGE_MODE)) {
+        cam_edge_application_t edge_application;
+        edge_application.edge_mode = frame_settings.find(ANDROID_EDGE_MODE).data.u8[0];
+        if (edge_application.edge_mode == CAM_EDGE_MODE_OFF) {
+            edge_application.sharpness = 0;
+        } else {
+            edge_application.sharpness = gCamCapability[mCameraId]->sharpness_ctrl.def_value; //default
+        }
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_EDGE_MODE, edge_application)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_FLASH_MODE)) {
+        int32_t respectFlashMode = 1;
+        if (frame_settings.exists(ANDROID_CONTROL_AE_MODE)) {
+            uint8_t fwk_aeMode =
+                frame_settings.find(ANDROID_CONTROL_AE_MODE).data.u8[0];
+            if (fwk_aeMode > ANDROID_CONTROL_AE_MODE_ON) {
+                respectFlashMode = 0;
+                CDBG_HIGH("%s: AE Mode controls flash, ignore android.flash.mode",
+                    __func__);
+            }
+        }
+        if (respectFlashMode) {
+            int val = lookupHalName(FLASH_MODES_MAP, METADATA_MAP_SIZE(FLASH_MODES_MAP),
+                    (int)frame_settings.find(ANDROID_FLASH_MODE).data.u8[0]);
+            CDBG_HIGH("%s: flash mode after mapping %d", __func__, val);
+            // To check: CAM_INTF_META_FLASH_MODE usage
+            if (NAME_NOT_FOUND != val) {
+                uint8_t flashMode = (uint8_t)val;
+                if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_LED_MODE, flashMode)) {
+                    rc = BAD_VALUE;
+                }
+            }
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_FLASH_FIRING_POWER)) {
+        uint8_t flashPower = frame_settings.find(ANDROID_FLASH_FIRING_POWER).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_FLASH_POWER, flashPower)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_FLASH_FIRING_TIME)) {
+        int64_t flashFiringTime = frame_settings.find(ANDROID_FLASH_FIRING_TIME).data.i64[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_FLASH_FIRING_TIME,
+                flashFiringTime)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_HOT_PIXEL_MODE)) {
+        uint8_t hotPixelMode = frame_settings.find(ANDROID_HOT_PIXEL_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_HOTPIXEL_MODE,
+                hotPixelMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_LENS_APERTURE)) {
+        float lensAperture = frame_settings.find( ANDROID_LENS_APERTURE).data.f[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_APERTURE,
+                lensAperture)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_LENS_FILTER_DENSITY)) {
+        float filterDensity = frame_settings.find(ANDROID_LENS_FILTER_DENSITY).data.f[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_FILTERDENSITY,
+                filterDensity)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_LENS_FOCAL_LENGTH)) {
+        float focalLength = frame_settings.find(ANDROID_LENS_FOCAL_LENGTH).data.f[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_FOCAL_LENGTH,
+                focalLength)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_LENS_OPTICAL_STABILIZATION_MODE)) {
+        uint8_t optStabMode =
+                frame_settings.find(ANDROID_LENS_OPTICAL_STABILIZATION_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_OPT_STAB_MODE,
+                optStabMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE)) {
+        uint8_t videoStabMode =
+                frame_settings.find(ANDROID_CONTROL_VIDEO_STABILIZATION_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_META_VIDEO_STAB_MODE,
+                videoStabMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+
+    if (frame_settings.exists(ANDROID_NOISE_REDUCTION_MODE)) {
+        uint8_t noiseRedMode = frame_settings.find(ANDROID_NOISE_REDUCTION_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_NOISE_REDUCTION_MODE,
+                noiseRedMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR)) {
+        float reprocessEffectiveExposureFactor =
+            frame_settings.find(ANDROID_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR).data.f[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR,
+                reprocessEffectiveExposureFactor)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    cam_crop_region_t scalerCropRegion;
+    bool scalerCropSet = false;
+    if (frame_settings.exists(ANDROID_SCALER_CROP_REGION)) {
+        scalerCropRegion.left = frame_settings.find(ANDROID_SCALER_CROP_REGION).data.i32[0];
+        scalerCropRegion.top = frame_settings.find(ANDROID_SCALER_CROP_REGION).data.i32[1];
+        scalerCropRegion.width = frame_settings.find(ANDROID_SCALER_CROP_REGION).data.i32[2];
+        scalerCropRegion.height = frame_settings.find(ANDROID_SCALER_CROP_REGION).data.i32[3];
+
+        // Map coordinate system from active array to sensor output.
+        mCropRegionMapper.toSensor(scalerCropRegion.left, scalerCropRegion.top,
+                scalerCropRegion.width, scalerCropRegion.height);
+
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SCALER_CROP_REGION,
+                scalerCropRegion)) {
+            rc = BAD_VALUE;
+        }
+        scalerCropSet = true;
+    }
+
+    if (frame_settings.exists(ANDROID_SENSOR_EXPOSURE_TIME)) {
+        int64_t sensorExpTime =
+                frame_settings.find(ANDROID_SENSOR_EXPOSURE_TIME).data.i64[0];
+        CDBG("%s: setting sensorExpTime %lld", __func__, sensorExpTime);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SENSOR_EXPOSURE_TIME,
+                sensorExpTime)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_SENSOR_FRAME_DURATION)) {
+        int64_t sensorFrameDuration =
+                frame_settings.find(ANDROID_SENSOR_FRAME_DURATION).data.i64[0];
+        int64_t minFrameDuration = getMinFrameDuration(request);
+        sensorFrameDuration = MAX(sensorFrameDuration, minFrameDuration);
+        if (sensorFrameDuration > gCamCapability[mCameraId]->max_frame_duration)
+            sensorFrameDuration = gCamCapability[mCameraId]->max_frame_duration;
+        CDBG("%s: clamp sensorFrameDuration to %lld", __func__, sensorFrameDuration);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SENSOR_FRAME_DURATION,
+                sensorFrameDuration)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_SENSOR_SENSITIVITY)) {
+        int32_t sensorSensitivity = frame_settings.find(ANDROID_SENSOR_SENSITIVITY).data.i32[0];
+        if (sensorSensitivity < gCamCapability[mCameraId]->sensitivity_range.min_sensitivity)
+                sensorSensitivity = gCamCapability[mCameraId]->sensitivity_range.min_sensitivity;
+        if (sensorSensitivity > gCamCapability[mCameraId]->sensitivity_range.max_sensitivity)
+                sensorSensitivity = gCamCapability[mCameraId]->sensitivity_range.max_sensitivity;
+        CDBG("%s: clamp sensorSensitivity to %d", __func__, sensorSensitivity);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SENSOR_SENSITIVITY,
+                sensorSensitivity)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_SHADING_MODE)) {
+        uint8_t shadingMode = frame_settings.find(ANDROID_SHADING_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_SHADING_MODE, shadingMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_STATISTICS_FACE_DETECT_MODE)) {
+        uint8_t fwk_facedetectMode =
+                frame_settings.find(ANDROID_STATISTICS_FACE_DETECT_MODE).data.u8[0];
+
+        int val = lookupHalName(FACEDETECT_MODES_MAP, METADATA_MAP_SIZE(FACEDETECT_MODES_MAP),
+                fwk_facedetectMode);
+
+        if (NAME_NOT_FOUND != val) {
+            uint8_t facedetectMode = (uint8_t)val;
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_STATS_FACEDETECT_MODE,
+                    facedetectMode)) {
+                rc = BAD_VALUE;
+            }
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_STATISTICS_HISTOGRAM_MODE)) {
+        uint8_t histogramMode =
+                frame_settings.find(ANDROID_STATISTICS_HISTOGRAM_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_STATS_HISTOGRAM_MODE,
+                histogramMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_STATISTICS_SHARPNESS_MAP_MODE)) {
+        uint8_t sharpnessMapMode =
+                frame_settings.find(ANDROID_STATISTICS_SHARPNESS_MAP_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_STATS_SHARPNESS_MAP_MODE,
+                sharpnessMapMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_TONEMAP_MODE)) {
+        uint8_t tonemapMode =
+                frame_settings.find(ANDROID_TONEMAP_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_TONEMAP_MODE, tonemapMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+    /* Tonemap curve channels ch0 = G, ch 1 = B, ch 2 = R */
+    /*All tonemap channels will have the same number of points*/
+    if (frame_settings.exists(ANDROID_TONEMAP_CURVE_GREEN) &&
+        frame_settings.exists(ANDROID_TONEMAP_CURVE_BLUE) &&
+        frame_settings.exists(ANDROID_TONEMAP_CURVE_RED)) {
+        cam_rgb_tonemap_curves tonemapCurves;
+        tonemapCurves.tonemap_points_cnt = frame_settings.find(ANDROID_TONEMAP_CURVE_GREEN).count/2;
+        if (tonemapCurves.tonemap_points_cnt > CAM_MAX_TONEMAP_CURVE_SIZE) {
+            ALOGE("%s: Fatal: tonemap_points_cnt %d exceeds max value of %d",
+                    __func__, tonemapCurves.tonemap_points_cnt,
+                    CAM_MAX_TONEMAP_CURVE_SIZE);
+            tonemapCurves.tonemap_points_cnt = CAM_MAX_TONEMAP_CURVE_SIZE;
+        }
+
+        /* ch0 = G*/
+        size_t point = 0;
+        cam_tonemap_curve_t tonemapCurveGreen;
+        for (size_t i = 0; i < tonemapCurves.tonemap_points_cnt; i++) {
+            for (size_t j = 0; j < 2; j++) {
+               tonemapCurveGreen.tonemap_points[i][j] =
+                  frame_settings.find(ANDROID_TONEMAP_CURVE_GREEN).data.f[point];
+               point++;
+            }
+        }
+        tonemapCurves.curves[0] = tonemapCurveGreen;
+
+        /* ch 1 = B */
+        point = 0;
+        cam_tonemap_curve_t tonemapCurveBlue;
+        for (size_t i = 0; i < tonemapCurves.tonemap_points_cnt; i++) {
+            for (size_t j = 0; j < 2; j++) {
+               tonemapCurveBlue.tonemap_points[i][j] =
+                  frame_settings.find(ANDROID_TONEMAP_CURVE_BLUE).data.f[point];
+               point++;
+            }
+        }
+        tonemapCurves.curves[1] = tonemapCurveBlue;
+
+        /* ch 2 = R */
+        point = 0;
+        cam_tonemap_curve_t tonemapCurveRed;
+        for (size_t i = 0; i < tonemapCurves.tonemap_points_cnt; i++) {
+            for (size_t j = 0; j < 2; j++) {
+               tonemapCurveRed.tonemap_points[i][j] =
+                  frame_settings.find(ANDROID_TONEMAP_CURVE_RED).data.f[point];
+               point++;
+            }
+        }
+        tonemapCurves.curves[2] = tonemapCurveRed;
+
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_TONEMAP_CURVES,
+                tonemapCurves)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_CAPTURE_INTENT)) {
+        uint8_t captureIntent = frame_settings.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_CAPTURE_INTENT,
+                captureIntent)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_BLACK_LEVEL_LOCK)) {
+        uint8_t blackLevelLock = frame_settings.find(ANDROID_BLACK_LEVEL_LOCK).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_BLACK_LEVEL_LOCK,
+                blackLevelLock)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE)) {
+        uint8_t lensShadingMapMode =
+                frame_settings.find(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_LENS_SHADING_MAP_MODE,
+                lensShadingMapMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_AE_REGIONS)) {
+        cam_area_t roi;
+        bool reset = true;
+        convertFromRegions(roi, request->settings, ANDROID_CONTROL_AE_REGIONS);
+
+        // Map coordinate system from active array to sensor output.
+        mCropRegionMapper.toSensor(roi.rect.left, roi.rect.top, roi.rect.width,
+                roi.rect.height);
+
+        if (scalerCropSet) {
+            reset = resetIfNeededROI(&roi, &scalerCropRegion);
+        }
+        if (reset && ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AEC_ROI, roi)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_CONTROL_AF_REGIONS)) {
+        cam_area_t roi;
+        bool reset = true;
+        convertFromRegions(roi, request->settings, ANDROID_CONTROL_AF_REGIONS);
+
+        // Map coordinate system from active array to sensor output.
+        mCropRegionMapper.toSensor(roi.rect.left, roi.rect.top, roi.rect.width,
+                roi.rect.height);
+
+        if (scalerCropSet) {
+            reset = resetIfNeededROI(&roi, &scalerCropRegion);
+        }
+        if (reset && ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_AF_ROI, roi)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (m_bIs4KVideo) {
+        /* Override needed for Video template in case of 4K video */
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata,
+                CAM_INTF_PARM_CDS_MODE, m_CdsPreference)) {
+            rc = BAD_VALUE;
+        }
+    } else if ((mOpMode != CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE) &&
+            frame_settings.exists(QCAMERA3_CDS_MODE)) {
+        int32_t *fwk_cds = frame_settings.find(QCAMERA3_CDS_MODE).data.i32;
+        if ((CAM_CDS_MODE_MAX <= *fwk_cds) || (0 > *fwk_cds)) {
+            ALOGE("%s: Invalid CDS mode %d!", __func__, *fwk_cds);
+        } else {
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata,
+                    CAM_INTF_PARM_CDS_MODE, *fwk_cds)) {
+                rc = BAD_VALUE;
+            }
+        }
+    }
+
+    // TNR
+    if (frame_settings.exists(QCAMERA3_TEMPORAL_DENOISE_ENABLE) &&
+        frame_settings.exists(QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE)) {
+        uint8_t b_TnrRequested = 0;
+        cam_denoise_param_t tnr;
+        tnr.denoise_enable = frame_settings.find(QCAMERA3_TEMPORAL_DENOISE_ENABLE).data.u8[0];
+        tnr.process_plates =
+            (cam_denoise_process_type_t)frame_settings.find(
+            QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE).data.i32[0];
+        b_TnrRequested = tnr.denoise_enable;
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters, CAM_INTF_PARM_TEMPORAL_DENOISE, tnr)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_SENSOR_TEST_PATTERN_MODE)) {
+        int32_t fwk_testPatternMode =
+                frame_settings.find(ANDROID_SENSOR_TEST_PATTERN_MODE).data.i32[0];
+        int testPatternMode = lookupHalName(TEST_PATTERN_MAP,
+                METADATA_MAP_SIZE(TEST_PATTERN_MAP), fwk_testPatternMode);
+
+        if (NAME_NOT_FOUND != testPatternMode) {
+            cam_test_pattern_data_t testPatternData;
+            memset(&testPatternData, 0, sizeof(testPatternData));
+            testPatternData.mode = (cam_test_pattern_mode_t)testPatternMode;
+            if (testPatternMode == CAM_TEST_PATTERN_SOLID_COLOR &&
+                    frame_settings.exists(ANDROID_SENSOR_TEST_PATTERN_DATA)) {
+                int32_t *fwk_testPatternData =
+                        frame_settings.find(ANDROID_SENSOR_TEST_PATTERN_DATA).data.i32;
+                testPatternData.r = fwk_testPatternData[0];
+                testPatternData.b = fwk_testPatternData[3];
+                switch (gCamCapability[mCameraId]->color_arrangement) {
+                    case CAM_FILTER_ARRANGEMENT_RGGB:
+                    case CAM_FILTER_ARRANGEMENT_GRBG:
+                        testPatternData.gr = fwk_testPatternData[1];
+                        testPatternData.gb = fwk_testPatternData[2];
+                        break;
+                    case CAM_FILTER_ARRANGEMENT_GBRG:
+                    case CAM_FILTER_ARRANGEMENT_BGGR:
+                        testPatternData.gr = fwk_testPatternData[2];
+                        testPatternData.gb = fwk_testPatternData[1];
+                        break;
+                    default:
+                        ALOGE("%s: color arrangement %d is not supported", __func__,
+                                gCamCapability[mCameraId]->color_arrangement);
+                        break;
+                }
+            }
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_TEST_PATTERN_DATA,
+                    testPatternData)) {
+                rc = BAD_VALUE;
+            }
+        } else {
+            ALOGE("%s: Invalid framework sensor test pattern mode %d", __func__,
+                    fwk_testPatternMode);
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_JPEG_GPS_COORDINATES)) {
+        size_t count = 0;
+        camera_metadata_entry_t gps_coords = frame_settings.find(ANDROID_JPEG_GPS_COORDINATES);
+        ADD_SET_PARAM_ARRAY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_GPS_COORDINATES,
+                gps_coords.data.d, gps_coords.count, count);
+        if (gps_coords.count != count) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_JPEG_GPS_PROCESSING_METHOD)) {
+        char gps_methods[GPS_PROCESSING_METHOD_SIZE];
+        size_t count = 0;
+        const char *gps_methods_src = (const char *)
+                frame_settings.find(ANDROID_JPEG_GPS_PROCESSING_METHOD).data.u8;
+        memset(gps_methods, '\0', sizeof(gps_methods));
+        strlcpy(gps_methods, gps_methods_src, sizeof(gps_methods));
+        ADD_SET_PARAM_ARRAY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_GPS_PROC_METHODS,
+                gps_methods, GPS_PROCESSING_METHOD_SIZE, count);
+        if (GPS_PROCESSING_METHOD_SIZE != count) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_JPEG_GPS_TIMESTAMP)) {
+        int64_t gps_timestamp = frame_settings.find(ANDROID_JPEG_GPS_TIMESTAMP).data.i64[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_GPS_TIMESTAMP,
+                gps_timestamp)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_JPEG_ORIENTATION)) {
+        int32_t orientation = frame_settings.find(ANDROID_JPEG_ORIENTATION).data.i32[0];
+        cam_rotation_info_t rotation_info;
+        if (orientation == 0) {
+           rotation_info.rotation = ROTATE_0;
+        } else if (orientation == 90) {
+           rotation_info.rotation = ROTATE_90;
+        } else if (orientation == 180) {
+           rotation_info.rotation = ROTATE_180;
+        } else if (orientation == 270) {
+           rotation_info.rotation = ROTATE_270;
+        }
+        rotation_info.streamId = snapshotStreamId;
+        ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_ORIENTATION, orientation);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_ROTATION, rotation_info)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_JPEG_QUALITY)) {
+        uint32_t quality = (uint32_t) frame_settings.find(ANDROID_JPEG_QUALITY).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_QUALITY, quality)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_JPEG_THUMBNAIL_QUALITY)) {
+        uint32_t thumb_quality = (uint32_t)
+                frame_settings.find(ANDROID_JPEG_THUMBNAIL_QUALITY).data.u8[0];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_THUMB_QUALITY,
+                thumb_quality)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(ANDROID_JPEG_THUMBNAIL_SIZE)) {
+        cam_dimension_t dim;
+        dim.width = frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[0];
+        dim.height = frame_settings.find(ANDROID_JPEG_THUMBNAIL_SIZE).data.i32[1];
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_JPEG_THUMB_SIZE, dim)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    // Internal metadata
+    if (frame_settings.exists(QCAMERA3_PRIVATEDATA_REPROCESS)) {
+        size_t count = 0;
+        camera_metadata_entry_t privatedata = frame_settings.find(QCAMERA3_PRIVATEDATA_REPROCESS);
+        ADD_SET_PARAM_ARRAY_TO_BATCH(hal_metadata, CAM_INTF_META_PRIVATE_DATA,
+                privatedata.data.i32, privatedata.count, count);
+        if (privatedata.count != count) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    if (frame_settings.exists(QCAMERA3_USE_AV_TIMER)) {
+        uint8_t* use_av_timer =
+                frame_settings.find(QCAMERA3_USE_AV_TIMER).data.u8;
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_META_USE_AV_TIMER, *use_av_timer)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    // EV step
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata, CAM_INTF_PARM_EV_STEP,
+            gCamCapability[mCameraId]->exp_compensation_step)) {
+        rc = BAD_VALUE;
+    }
+
+    // CDS info
+    if (frame_settings.exists(QCAMERA3_CDS_INFO)) {
+        cam_cds_data_t *cdsData = (cam_cds_data_t *)
+                frame_settings.find(QCAMERA3_CDS_INFO).data.u8;
+
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata,
+                CAM_INTF_META_CDS_DATA, *cdsData)) {
+            rc = BAD_VALUE;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : captureResultCb
+ *
+ * DESCRIPTION: Callback handler for all channels (streams, as well as metadata)
+ *
+ * PARAMETERS :
+ *   @frame  : frame information from mm-camera-interface
+ *   @buffer : actual gralloc buffer to be returned to frameworks. NULL if metadata.
+ *   @userdata: userdata
+ *
+ * RETURN     : NONE
+ *==========================================================================*/
+void QCamera3HardwareInterface::captureResultCb(mm_camera_super_buf_t *metadata,
+                camera3_stream_buffer_t *buffer,
+                uint32_t frame_number, bool isInputBuffer, void *userdata)
+{
+    QCamera3HardwareInterface *hw = (QCamera3HardwareInterface *)userdata;
+    if (hw == NULL) {
+        ALOGE("%s: Invalid hw %p", __func__, hw);
+        return;
+    }
+
+    hw->captureResultCb(metadata, buffer, frame_number, isInputBuffer);
+    return;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : initialize
+ *
+ * DESCRIPTION: Pass framework callback pointers to HAL
+ *
+ * PARAMETERS :
+ *
+ *
+ * RETURN     : Success : 0
+ *              Failure: -ENODEV
+ *==========================================================================*/
+
+int QCamera3HardwareInterface::initialize(const struct camera3_device *device,
+                                  const camera3_callback_ops_t *callback_ops)
+{
+    CDBG("%s: E", __func__);
+    QCamera3HardwareInterface *hw =
+        reinterpret_cast<QCamera3HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("%s: NULL camera device", __func__);
+        return -ENODEV;
+    }
+
+    int rc = hw->initialize(callback_ops);
+    CDBG("%s: X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : configure_streams
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *
+ *
+ * RETURN     : Success: 0
+ *              Failure: -EINVAL (if stream configuration is invalid)
+ *                       -ENODEV (fatal error)
+ *==========================================================================*/
+
+int QCamera3HardwareInterface::configure_streams(
+        const struct camera3_device *device,
+        camera3_stream_configuration_t *stream_list)
+{
+    CDBG("%s: E", __func__);
+    QCamera3HardwareInterface *hw =
+        reinterpret_cast<QCamera3HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("%s: NULL camera device", __func__);
+        return -ENODEV;
+    }
+    int rc = hw->configureStreams(stream_list);
+    CDBG("%s: X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : construct_default_request_settings
+ *
+ * DESCRIPTION: Configure a settings buffer to meet the required use case
+ *
+ * PARAMETERS :
+ *
+ *
+ * RETURN     : Success: Return valid metadata
+ *              Failure: Return NULL
+ *==========================================================================*/
+const camera_metadata_t* QCamera3HardwareInterface::
+    construct_default_request_settings(const struct camera3_device *device,
+                                        int type)
+{
+
+    CDBG("%s: E", __func__);
+    camera_metadata_t* fwk_metadata = NULL;
+    QCamera3HardwareInterface *hw =
+        reinterpret_cast<QCamera3HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("%s: NULL camera device", __func__);
+        return NULL;
+    }
+
+    fwk_metadata = hw->translateCapabilityToMetadata(type);
+
+    CDBG("%s: X", __func__);
+    return fwk_metadata;
+}
+
+/*===========================================================================
+ * FUNCTION   : process_capture_request
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *
+ *
+ * RETURN     :
+ *==========================================================================*/
+int QCamera3HardwareInterface::process_capture_request(
+                    const struct camera3_device *device,
+                    camera3_capture_request_t *request)
+{
+    CDBG("%s: E", __func__);
+    QCamera3HardwareInterface *hw =
+        reinterpret_cast<QCamera3HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("%s: NULL camera device", __func__);
+        return -EINVAL;
+    }
+
+    int rc = hw->processCaptureRequest(request);
+    CDBG("%s: X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : dump
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *
+ *
+ * RETURN     :
+ *==========================================================================*/
+
+void QCamera3HardwareInterface::dump(
+                const struct camera3_device *device, int fd)
+{
+    /* Log level property is read when "adb shell dumpsys media.camera" is
+       called so that the log level can be controlled without restarting
+       the media server */
+    getLogLevel();
+
+    CDBG("%s: E", __func__);
+    QCamera3HardwareInterface *hw =
+        reinterpret_cast<QCamera3HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("%s: NULL camera device", __func__);
+        return;
+    }
+
+    hw->dump(fd);
+    CDBG("%s: X", __func__);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : flush
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *
+ *
+ * RETURN     :
+ *==========================================================================*/
+
+int QCamera3HardwareInterface::flush(
+                const struct camera3_device *device)
+{
+    int rc;
+    CDBG("%s: E", __func__);
+    QCamera3HardwareInterface *hw =
+        reinterpret_cast<QCamera3HardwareInterface *>(device->priv);
+    if (!hw) {
+        ALOGE("%s: NULL camera device", __func__);
+        return -EINVAL;
+    }
+
+    rc = hw->flush();
+    CDBG("%s: X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : close_camera_device
+ *
+ * DESCRIPTION:
+ *
+ * PARAMETERS :
+ *
+ *
+ * RETURN     :
+ *==========================================================================*/
+int QCamera3HardwareInterface::close_camera_device(struct hw_device_t* device)
+{
+    CDBG("%s: E", __func__);
+    int ret = NO_ERROR;
+    QCamera3HardwareInterface *hw =
+        reinterpret_cast<QCamera3HardwareInterface *>(
+            reinterpret_cast<camera3_device_t *>(device)->priv);
+    if (!hw) {
+        ALOGE("NULL camera device");
+        return BAD_VALUE;
+    }
+    delete hw;
+
+    CDBG("%s: X", __func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getWaveletDenoiseProcessPlate
+ *
+ * DESCRIPTION: query wavelet denoise process plate
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : WNR prcocess plate value
+ *==========================================================================*/
+cam_denoise_process_type_t QCamera3HardwareInterface::getWaveletDenoiseProcessPlate()
+{
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.denoise.process.plates", prop, "0");
+    int processPlate = atoi(prop);
+    switch(processPlate) {
+    case 0:
+        return CAM_WAVELET_DENOISE_YCBCR_PLANE;
+    case 1:
+        return CAM_WAVELET_DENOISE_CBCR_ONLY;
+    case 2:
+        return CAM_WAVELET_DENOISE_STREAMLINE_YCBCR;
+    case 3:
+        return CAM_WAVELET_DENOISE_STREAMLINED_CBCR;
+    default:
+        return CAM_WAVELET_DENOISE_STREAMLINE_YCBCR;
+    }
+}
+
+
+/*===========================================================================
+ * FUNCTION   : getTemporalDenoiseProcessPlate
+ *
+ * DESCRIPTION: query temporal denoise process plate
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : TNR prcocess plate value
+ *==========================================================================*/
+cam_denoise_process_type_t QCamera3HardwareInterface::getTemporalDenoiseProcessPlate()
+{
+    char prop[PROPERTY_VALUE_MAX];
+    memset(prop, 0, sizeof(prop));
+    property_get("persist.tnr.process.plates", prop, "0");
+    int processPlate = atoi(prop);
+    switch(processPlate) {
+    case 0:
+        return CAM_WAVELET_DENOISE_YCBCR_PLANE;
+    case 1:
+        return CAM_WAVELET_DENOISE_CBCR_ONLY;
+    case 2:
+        return CAM_WAVELET_DENOISE_STREAMLINE_YCBCR;
+    case 3:
+        return CAM_WAVELET_DENOISE_STREAMLINED_CBCR;
+    default:
+        return CAM_WAVELET_DENOISE_STREAMLINE_YCBCR;
+    }
+}
+
+
+/*===========================================================================
+ * FUNCTION   : extractSceneMode
+ *
+ * DESCRIPTION: Extract scene mode from frameworks set metadata
+ *
+ * PARAMETERS :
+ *      @frame_settings: CameraMetadata reference
+ *      @metaMode: ANDROID_CONTORL_MODE
+ *      @hal_metadata: hal metadata structure
+ *
+ * RETURN     : None
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::extractSceneMode(
+        const CameraMetadata &frame_settings, uint8_t metaMode,
+        metadata_buffer_t *hal_metadata)
+{
+    int32_t rc = NO_ERROR;
+
+    if (metaMode == ANDROID_CONTROL_MODE_USE_SCENE_MODE) {
+        camera_metadata_ro_entry entry =
+                frame_settings.find(ANDROID_CONTROL_SCENE_MODE);
+        if (0 == entry.count)
+            return rc;
+
+        uint8_t fwk_sceneMode = entry.data.u8[0];
+
+        int val = lookupHalName(SCENE_MODES_MAP,
+                sizeof(SCENE_MODES_MAP)/sizeof(SCENE_MODES_MAP[0]),
+                fwk_sceneMode);
+        if (NAME_NOT_FOUND != val) {
+            uint8_t sceneMode = (uint8_t)val;
+            CDBG("%s: sceneMode: %d", __func__, sceneMode);
+            if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata,
+                    CAM_INTF_PARM_BESTSHOT_MODE, sceneMode)) {
+                rc = BAD_VALUE;
+            }
+        }
+    } else if ((ANDROID_CONTROL_MODE_OFF == metaMode) ||
+            (ANDROID_CONTROL_MODE_AUTO == metaMode)) {
+        uint8_t sceneMode = CAM_SCENE_MODE_OFF;
+        CDBG("%s: sceneMode: %d", __func__, sceneMode);
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(hal_metadata,
+                CAM_INTF_PARM_BESTSHOT_MODE, sceneMode)) {
+            rc = BAD_VALUE;
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : needRotationReprocess
+ *
+ * DESCRIPTION: if rotation needs to be done by reprocess in pp
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera3HardwareInterface::needRotationReprocess()
+{
+    if ((gCamCapability[mCameraId]->qcom_supported_feature_mask & CAM_QCOM_FEATURE_ROTATION) > 0) {
+        // current rotation is not zero, and pp has the capability to process rotation
+        CDBG_HIGH("%s: need do reprocess for rotation", __func__);
+        return true;
+    }
+
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : needReprocess
+ *
+ * DESCRIPTION: if reprocess in needed
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera3HardwareInterface::needReprocess(uint32_t postprocess_mask)
+{
+    if (gCamCapability[mCameraId]->min_required_pp_mask > 0) {
+        // TODO: add for ZSL HDR later
+        // pp module has min requirement for zsl reprocess, or WNR in ZSL mode
+        if(postprocess_mask == CAM_QCOM_FEATURE_NONE){
+            CDBG_HIGH("%s: need do reprocess for ZSL WNR or min PP reprocess", __func__);
+            return true;
+        } else {
+            CDBG_HIGH("%s: already post processed frame", __func__);
+            return false;
+        }
+    }
+    return needRotationReprocess();
+}
+
+/*===========================================================================
+ * FUNCTION   : needJpegRotation
+ *
+ * DESCRIPTION: if rotation from jpeg is needed
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : true: needed
+ *              false: no need
+ *==========================================================================*/
+bool QCamera3HardwareInterface::needJpegRotation()
+{
+   /*If the pp does not have the ability to do rotation, enable jpeg rotation*/
+    if (!(gCamCapability[mCameraId]->qcom_supported_feature_mask & CAM_QCOM_FEATURE_ROTATION)) {
+       CDBG("%s: Need Jpeg to do the rotation", __func__);
+       return true;
+    }
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : addOfflineReprocChannel
+ *
+ * DESCRIPTION: add a reprocess channel that will do reprocess on frames
+ *              coming from input channel
+ *
+ * PARAMETERS :
+ *   @config  : reprocess configuration
+ *   @inputChHandle : pointer to the input (source) channel
+ *
+ *
+ * RETURN     : Ptr to the newly created channel obj. NULL if failed.
+ *==========================================================================*/
+QCamera3ReprocessChannel *QCamera3HardwareInterface::addOfflineReprocChannel(
+        const reprocess_config_t &config, QCamera3ProcessingChannel *inputChHandle)
+{
+    int32_t rc = NO_ERROR;
+    QCamera3ReprocessChannel *pChannel = NULL;
+
+    pChannel = new QCamera3ReprocessChannel(mCameraHandle->camera_handle,
+            mChannelHandle, mCameraHandle->ops, captureResultCb, config.padding,
+            CAM_QCOM_FEATURE_NONE, this, inputChHandle);
+    if (NULL == pChannel) {
+        ALOGE("%s: no mem for reprocess channel", __func__);
+        return NULL;
+    }
+
+    rc = pChannel->initialize(IS_TYPE_NONE);
+    if (rc != NO_ERROR) {
+        ALOGE("%s: init reprocess channel failed, ret = %d", __func__, rc);
+        delete pChannel;
+        return NULL;
+    }
+
+    // pp feature config
+    cam_pp_feature_config_t pp_config;
+    memset(&pp_config, 0, sizeof(cam_pp_feature_config_t));
+
+    pp_config.feature_mask |= CAM_QCOM_FEATURE_PP_SUPERSET_HAL3;
+
+    rc = pChannel->addReprocStreamsFromSource(pp_config,
+            config,
+            IS_TYPE_NONE,
+            mMetadataChannel);
+
+    if (rc != NO_ERROR) {
+        delete pChannel;
+        return NULL;
+    }
+    return pChannel;
+}
+
+/*===========================================================================
+ * FUNCTION   : getMobicatMask
+ *
+ * DESCRIPTION: returns mobicat mask
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : mobicat mask
+ *
+ *==========================================================================*/
+uint8_t QCamera3HardwareInterface::getMobicatMask()
+{
+    return m_MobicatMask;
+}
+
+/*===========================================================================
+ * FUNCTION   : setMobicat
+ *
+ * DESCRIPTION: set Mobicat on/off.
+ *
+ * PARAMETERS :
+ *   @params  : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::setMobicat()
+{
+    char value [PROPERTY_VALUE_MAX];
+    property_get("persist.camera.mobicat", value, "0");
+    int32_t ret = NO_ERROR;
+    uint8_t enableMobi = (uint8_t)atoi(value);
+
+    if (enableMobi) {
+        tune_cmd_t tune_cmd;
+        tune_cmd.type = SET_RELOAD_CHROMATIX;
+        tune_cmd.module = MODULE_ALL;
+        tune_cmd.value = TRUE;
+        ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters,
+                CAM_INTF_PARM_SET_VFE_COMMAND,
+                tune_cmd);
+
+        ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters,
+                CAM_INTF_PARM_SET_PP_COMMAND,
+                tune_cmd);
+    }
+    m_MobicatMask = enableMobi;
+
+    return ret;
+}
+
+/*===========================================================================
+* FUNCTION   : getLogLevel
+*
+* DESCRIPTION: Reads the log level property into a variable
+*
+* PARAMETERS :
+*   None
+*
+* RETURN     :
+*   None
+*==========================================================================*/
+void QCamera3HardwareInterface::getLogLevel()
+{
+    char prop[PROPERTY_VALUE_MAX];
+    uint32_t globalLogLevel = 0;
+
+    property_get("persist.camera.hal.debug", prop, "0");
+    int val = atoi(prop);
+    if (0 <= val) {
+        gCamHal3LogLevel = (uint32_t)val;
+    }
+    property_get("persist.camera.global.debug", prop, "0");
+    val = atoi(prop);
+    if (0 <= val) {
+        globalLogLevel = (uint32_t)val;
+    }
+
+    /* Highest log level among hal.logs and global.logs is selected */
+    if (gCamHal3LogLevel < globalLogLevel)
+        gCamHal3LogLevel = globalLogLevel;
+
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : validateStreamRotations
+ *
+ * DESCRIPTION: Check if the rotations requested are supported
+ *
+ * PARAMETERS :
+ *   @stream_list : streams to be configured
+ *
+ * RETURN     : NO_ERROR on success
+ *              -EINVAL on failure
+ *
+ *==========================================================================*/
+int QCamera3HardwareInterface::validateStreamRotations(
+        camera3_stream_configuration_t *streamList)
+{
+    int rc = NO_ERROR;
+
+    /*
+    * Loop through all streams requested in configuration
+    * Check if unsupported rotations have been requested on any of them
+    */
+    for (size_t j = 0; j < streamList->num_streams; j++){
+        camera3_stream_t *newStream = streamList->streams[j];
+
+        bool isRotated = (newStream->rotation != CAMERA3_STREAM_ROTATION_0);
+        bool isImplDef = (newStream->format ==
+                HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
+        bool isZsl = (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL &&
+                isImplDef);
+
+        if (isRotated && (!isImplDef || isZsl)) {
+            ALOGE("%s: Error: Unsupported rotation of %d requested for stream"
+                    "type:%d and stream format:%d", __func__,
+                    newStream->rotation, newStream->stream_type,
+                    newStream->format);
+            rc = -EINVAL;
+            break;
+        }
+    }
+    return rc;
+}
+
+/*===========================================================================
+* FUNCTION   : getFlashInfo
+*
+* DESCRIPTION: Retrieve information about whether the device has a flash.
+*
+* PARAMETERS :
+*   @cameraId  : Camera id to query
+*   @hasFlash  : Boolean indicating whether there is a flash device
+*                associated with given camera
+*   @flashNode : If a flash device exists, this will be its device node.
+*
+* RETURN     :
+*   None
+*==========================================================================*/
+void QCamera3HardwareInterface::getFlashInfo(const int cameraId,
+        bool& hasFlash,
+        char (&flashNode)[QCAMERA_MAX_FILEPATH_LENGTH])
+{
+    cam_capability_t* camCapability = gCamCapability[cameraId];
+    if (NULL == camCapability) {
+        hasFlash = false;
+        flashNode[0] = '\0';
+    } else {
+        hasFlash = camCapability->flash_available;
+        strlcpy(flashNode,
+                (char*)camCapability->flash_dev_name,
+                QCAMERA_MAX_FILEPATH_LENGTH);
+    }
+}
+
+/*===========================================================================
+* FUNCTION   : getEepromVersionInfo
+*
+* DESCRIPTION: Retrieve version info of the sensor EEPROM data
+*
+* PARAMETERS : None
+*
+* RETURN     : string describing EEPROM version
+*              "\0" if no such info available
+*==========================================================================*/
+const char *QCamera3HardwareInterface::getEepromVersionInfo()
+{
+    return (const char *)&gCamCapability[mCameraId]->eeprom_version_info[0];
+}
+
+/*===========================================================================
+* FUNCTION   : getLdafCalib
+*
+* DESCRIPTION: Retrieve Laser AF calibration data
+*
+* PARAMETERS : None
+*
+* RETURN     : Two uint32_t describing laser AF calibration data
+*              NULL if none is available.
+*==========================================================================*/
+const uint32_t *QCamera3HardwareInterface::getLdafCalib()
+{
+    if (mLdafCalibExist) {
+        return &mLdafCalib[0];
+    } else {
+        return NULL;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : dynamicUpdateMetaStreamInfo
+ *
+ * DESCRIPTION: This function:
+ *             (1) stops all the channels
+ *             (2) returns error on pending requests and buffers
+ *             (3) sends metastream_info in setparams
+ *             (4) starts all channels
+ *             This is useful when sensor has to be restarted to apply any
+ *             settings such as frame rate from a different sensor mode
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : NO_ERROR on success
+ *              Error codes on failure
+ *
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::dynamicUpdateMetaStreamInfo()
+{
+    ATRACE_CALL();
+    int rc = NO_ERROR;
+
+    CDBG("%s: E", __func__);
+
+    rc = stopAllChannels();
+    if (rc < 0) {
+        ALOGE("%s: stopAllChannels failed", __func__);
+        return rc;
+    }
+
+    rc = notifyErrorForPendingRequests();
+    if (rc < 0) {
+        ALOGE("%s: notifyErrorForPendingRequests failed", __func__);
+        return rc;
+    }
+
+    /* Send meta stream info once again so that ISP can start */
+    ADD_SET_PARAM_ENTRY_TO_BATCH(mParameters,
+            CAM_INTF_META_STREAM_INFO, mStreamConfigInfo);
+    CDBG("%s: set_parms META_STREAM_INFO with new settings ", __func__ );
+    rc = mCameraHandle->ops->set_parms(mCameraHandle->camera_handle,
+            mParameters);
+    if (rc < 0) {
+        ALOGE("%s: set Metastreaminfo failed. Sensor mode does not change",
+                __func__);
+    }
+
+    rc = startAllChannels();
+    if (rc < 0) {
+        ALOGE("%s: startAllChannels failed", __func__);
+        return rc;
+    }
+
+    CDBG("%s:%d X", __func__, __LINE__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stopAllChannels
+ *
+ * DESCRIPTION: This function stops (equivalent to stream-off) all channels
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : NO_ERROR on success
+ *              Error codes on failure
+ *
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::stopAllChannels()
+{
+    int32_t rc = NO_ERROR;
+
+    // Stop the Streams/Channels
+    for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+        it != mStreamInfo.end(); it++) {
+        QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
+        channel->stop();
+        (*it)->status = INVALID;
+    }
+
+    if (mSupportChannel) {
+        mSupportChannel->stop();
+    }
+    if (mAnalysisChannel) {
+        mAnalysisChannel->stop();
+    }
+    if (mRawDumpChannel) {
+        mRawDumpChannel->stop();
+    }
+    if (mMetadataChannel) {
+        /* If content of mStreamInfo is not 0, there is metadata stream */
+        mMetadataChannel->stop();
+    }
+
+    CDBG("%s:%d All channels stopped", __func__, __LINE__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : startAllChannels
+ *
+ * DESCRIPTION: This function starts (equivalent to stream-on) all channels
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : NO_ERROR on success
+ *              Error codes on failure
+ *
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::startAllChannels()
+{
+    int32_t rc = NO_ERROR;
+
+    CDBG("%s: Start all channels ", __func__);
+    // Start the Streams/Channels
+    if (mMetadataChannel) {
+        /* If content of mStreamInfo is not 0, there is metadata stream */
+        rc = mMetadataChannel->start();
+        if (rc < 0) {
+            ALOGE("%s: META channel start failed", __func__);
+            return rc;
+        }
+    }
+    for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+        it != mStreamInfo.end(); it++) {
+        QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
+        rc = channel->start();
+        if (rc < 0) {
+            ALOGE("%s: channel start failed", __func__);
+            return rc;
+        }
+    }
+    if (mAnalysisChannel) {
+        mAnalysisChannel->start();
+    }
+    if (mSupportChannel) {
+        rc = mSupportChannel->start();
+        if (rc < 0) {
+            ALOGE("%s: Support channel start failed", __func__);
+            return rc;
+        }
+    }
+    if (mRawDumpChannel) {
+        rc = mRawDumpChannel->start();
+        if (rc < 0) {
+            ALOGE("%s: RAW dump channel start failed", __func__);
+            return rc;
+        }
+    }
+
+    CDBG("%s:%d All channels started", __func__, __LINE__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : notifyErrorForPendingRequests
+ *
+ * DESCRIPTION: This function sends error for all the pending requests/buffers
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : Error codes
+ *              NO_ERROR on success
+ *
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::notifyErrorForPendingRequests()
+{
+    int32_t rc = NO_ERROR;
+    unsigned int frameNum = 0;
+    camera3_capture_result_t result;
+    camera3_stream_buffer_t *pStream_Buf = NULL;
+    FlushMap flushMap;
+
+    memset(&result, 0, sizeof(camera3_capture_result_t));
+
+    if (mPendingRequestsList.size() > 0) {
+        pendingRequestIterator i = mPendingRequestsList.begin();
+        frameNum = i->frame_number;
+    } else {
+        /* There might still be pending buffers even though there are
+         no pending requests. Setting the frameNum to MAX so that
+         all the buffers with smaller frame numbers are returned */
+        frameNum = UINT_MAX;
+    }
+
+    CDBG_HIGH("%s: Oldest frame num on  mPendingRequestsList = %d",
+      __func__, frameNum);
+
+    // Go through the pending buffers and group them depending
+    // on frame number
+    for (List<PendingBufferInfo>::iterator k =
+            mPendingBuffersMap.mPendingBufferList.begin();
+            k != mPendingBuffersMap.mPendingBufferList.end();) {
+
+        if (k->frame_number < frameNum) {
+            ssize_t idx = flushMap.indexOfKey(k->frame_number);
+            if (idx == NAME_NOT_FOUND) {
+                Vector<PendingBufferInfo> pending;
+                pending.add(*k);
+                flushMap.add(k->frame_number, pending);
+            } else {
+                Vector<PendingBufferInfo> &pending =
+                        flushMap.editValueFor(k->frame_number);
+                pending.add(*k);
+            }
+
+            mPendingBuffersMap.num_buffers--;
+            k = mPendingBuffersMap.mPendingBufferList.erase(k);
+        } else {
+            k++;
+        }
+    }
+
+    for (size_t iFlush = 0; iFlush < flushMap.size(); iFlush++) {
+        uint32_t frame_number = flushMap.keyAt(iFlush);
+        const Vector<PendingBufferInfo> &pending = flushMap.valueAt(iFlush);
+
+        // Send Error notify to frameworks for each buffer for which
+        // metadata buffer is already sent
+        CDBG_HIGH("%s: Sending ERROR BUFFER for frame %d number of buffer %d",
+          __func__, frame_number, pending.size());
+
+        pStream_Buf = new camera3_stream_buffer_t[pending.size()];
+        if (NULL == pStream_Buf) {
+            ALOGE("%s: No memory for pending buffers array", __func__);
+            return NO_MEMORY;
+        }
+        memset(pStream_Buf, 0, sizeof(camera3_stream_buffer_t)*pending.size());
+
+        for (size_t j = 0; j < pending.size(); j++) {
+            const PendingBufferInfo &info = pending.itemAt(j);
+            camera3_notify_msg_t notify_msg;
+            memset(&notify_msg, 0, sizeof(camera3_notify_msg_t));
+            notify_msg.type = CAMERA3_MSG_ERROR;
+            notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_BUFFER;
+            notify_msg.message.error.error_stream = info.stream;
+            notify_msg.message.error.frame_number = frame_number;
+            pStream_Buf[j].acquire_fence = -1;
+            pStream_Buf[j].release_fence = -1;
+            pStream_Buf[j].buffer = info.buffer;
+            pStream_Buf[j].status = CAMERA3_BUFFER_STATUS_ERROR;
+            pStream_Buf[j].stream = info.stream;
+            mCallbackOps->notify(mCallbackOps, &notify_msg);
+            CDBG_HIGH("%s: notify frame_number = %d stream %p", __func__,
+                    frame_number, info.stream);
+        }
+
+        result.result = NULL;
+        result.frame_number = frame_number;
+        result.num_output_buffers = (uint32_t)pending.size();
+        result.output_buffers = pStream_Buf;
+        mCallbackOps->process_capture_result(mCallbackOps, &result);
+
+        delete [] pStream_Buf;
+    }
+
+    CDBG_HIGH("%s:Sending ERROR REQUEST for all pending requests", __func__);
+
+    flushMap.clear();
+    for (List<PendingBufferInfo>::iterator k =
+            mPendingBuffersMap.mPendingBufferList.begin();
+            k != mPendingBuffersMap.mPendingBufferList.end();) {
+        ssize_t idx = flushMap.indexOfKey(k->frame_number);
+        if (idx == NAME_NOT_FOUND) {
+            Vector<PendingBufferInfo> pending;
+            pending.add(*k);
+            flushMap.add(k->frame_number, pending);
+        } else {
+            Vector<PendingBufferInfo> &pending =
+                    flushMap.editValueFor(k->frame_number);
+            pending.add(*k);
+        }
+
+        mPendingBuffersMap.num_buffers--;
+        k = mPendingBuffersMap.mPendingBufferList.erase(k);
+    }
+
+    pendingRequestIterator i = mPendingRequestsList.begin(); //make sure i is at the beginning
+
+    // Go through the pending requests info and send error request to framework
+    for (size_t iFlush = 0; iFlush < flushMap.size(); iFlush++) {
+        uint32_t frame_number = flushMap.keyAt(iFlush);
+        const Vector<PendingBufferInfo> &pending = flushMap.valueAt(iFlush);
+        CDBG_HIGH("%s:Sending ERROR REQUEST for frame %d",
+              __func__, frame_number);
+
+        // Send shutter notify to frameworks
+        camera3_notify_msg_t notify_msg;
+        memset(&notify_msg, 0, sizeof(camera3_notify_msg_t));
+        notify_msg.type = CAMERA3_MSG_ERROR;
+        notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_REQUEST;
+        notify_msg.message.error.error_stream = NULL;
+        notify_msg.message.error.frame_number = frame_number;
+        mCallbackOps->notify(mCallbackOps, &notify_msg);
+
+        pStream_Buf = new camera3_stream_buffer_t[pending.size()];
+        if (NULL == pStream_Buf) {
+            ALOGE("%s: No memory for pending buffers array", __func__);
+            return NO_MEMORY;
+        }
+        memset(pStream_Buf, 0, sizeof(camera3_stream_buffer_t)*pending.size());
+
+        for (size_t j = 0; j < pending.size(); j++) {
+            const PendingBufferInfo &info = pending.itemAt(j);
+            pStream_Buf[j].acquire_fence = -1;
+            pStream_Buf[j].release_fence = -1;
+            pStream_Buf[j].buffer = info.buffer;
+            pStream_Buf[j].status = CAMERA3_BUFFER_STATUS_ERROR;
+            pStream_Buf[j].stream = info.stream;
+        }
+
+        result.input_buffer = i->input_buffer;
+        result.num_output_buffers = (uint32_t)pending.size();
+        result.output_buffers = pStream_Buf;
+        result.result = NULL;
+        result.frame_number = frame_number;
+        mCallbackOps->process_capture_result(mCallbackOps, &result);
+        delete [] pStream_Buf;
+        i = erasePendingRequest(i);
+    }
+
+    /* Reset pending frame Drop list and requests list */
+    mPendingFrameDropList.clear();
+
+    flushMap.clear();
+    mPendingBuffersMap.num_buffers = 0;
+    mPendingBuffersMap.mPendingBufferList.clear();
+    mPendingReprocessResultList.clear();
+    CDBG_HIGH("%s: Cleared all the pending buffers ", __func__);
+
+    return rc;
+}
+
+bool QCamera3HardwareInterface::isOnEncoder(
+        const cam_dimension_t max_viewfinder_size,
+        uint32_t width, uint32_t height)
+{
+    return (width > (uint32_t)max_viewfinder_size.width ||
+            height > (uint32_t)max_viewfinder_size.height);
+}
+
+/*===========================================================================
+ * FUNCTION   : setBundleInfo
+ *
+ * DESCRIPTION: Set bundle info for all streams that are bundle.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : NO_ERROR on success
+ *              Error codes on failure
+ *==========================================================================*/
+int32_t QCamera3HardwareInterface::setBundleInfo()
+{
+    int32_t rc = NO_ERROR;
+
+    if (mChannelHandle) {
+        cam_bundle_config_t bundleInfo;
+        memset(&bundleInfo, 0, sizeof(bundleInfo));
+        rc = mCameraHandle->ops->get_bundle_info(
+                mCameraHandle->camera_handle, mChannelHandle, &bundleInfo);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: get_bundle_info failed", __func__);
+            return rc;
+        }
+        if (mAnalysisChannel) {
+            mAnalysisChannel->setBundleInfo(bundleInfo);
+        }
+        if (mSupportChannel) {
+            mSupportChannel->setBundleInfo(bundleInfo);
+        }
+        for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
+                it != mStreamInfo.end(); it++) {
+            QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
+            channel->setBundleInfo(bundleInfo);
+        }
+        if (mRawDumpChannel) {
+            mRawDumpChannel->setBundleInfo(bundleInfo);
+        }
+    }
+
+    return rc;
+}
+
+}; //end namespace qcamera
diff --git a/camera/QCamera2/HAL3/QCamera3HWI.h b/camera/QCamera2/HAL3/QCamera3HWI.h
new file mode 100644
index 0000000..75055b0
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3HWI.h
@@ -0,0 +1,465 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#ifndef __QCAMERA3HARDWAREINTERFACE_H__
+#define __QCAMERA3HARDWAREINTERFACE_H__
+
+#include <pthread.h>
+#include <utils/List.h>
+#include <utils/KeyedVector.h>
+#include <hardware/camera3.h>
+#include <camera/CameraMetadata.h>
+#include "QCamera3HALHeader.h"
+#include "QCamera3Channel.h"
+#include "QCamera3CropRegionMapper.h"
+#include "QCameraPerf.h"
+
+extern "C" {
+#include <mm_camera_interface.h>
+#include <mm_jpeg_interface.h>
+}
+#ifdef CDBG
+#undef CDBG
+#endif //#ifdef CDBG
+#define CDBG(fmt, args...) ALOGD_IF(gCamHal3LogLevel >= 2, fmt, ##args)
+
+#ifdef CDBG_HIGH
+#undef CDBG_HIGH
+#endif //#ifdef CDBG_HIGH
+#define CDBG_HIGH(fmt, args...) ALOGD_IF(gCamHal3LogLevel >= 1, fmt, ##args)
+
+#ifdef CDBG_FATAL_IF
+#undef CDBG_FATAL_IF
+#endif //#ifdef CDBG_FATAL_IF
+#define CDBG_FATAL_IF(cond, ...) LOG_ALWAYS_FATAL_IF(cond, ## __VA_ARGS__)
+
+using namespace android;
+
+namespace qcamera {
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+/* Time related macros */
+typedef int64_t nsecs_t;
+#define NSEC_PER_SEC 1000000000LLU
+#define NSEC_PER_USEC 1000LLU
+#define NSEC_PER_33MSEC 33000000LLU
+
+typedef enum {
+    SET_ENABLE,
+    SET_CONTROLENABLE,
+    SET_RELOAD_CHROMATIX,
+    SET_STATUS,
+} optype_t;
+
+#define MODULE_ALL 0
+
+extern volatile uint32_t gCamHal3LogLevel;
+
+class QCamera3MetadataChannel;
+class QCamera3PicChannel;
+class QCamera3HeapMemory;
+class QCamera3Exif;
+
+typedef struct {
+    camera3_stream_t *stream;
+    camera3_stream_buffer_set_t buffer_set;
+    stream_status_t status;
+    int registered;
+    QCamera3ProcessingChannel *channel;
+} stream_info_t;
+
+class QCamera3HardwareInterface {
+public:
+    /* static variable and functions accessed by camera service */
+    static camera3_device_ops_t mCameraOps;
+    static int initialize(const struct camera3_device *,
+                const camera3_callback_ops_t *callback_ops);
+    static int configure_streams(const struct camera3_device *,
+                camera3_stream_configuration_t *stream_list);
+    static const camera_metadata_t* construct_default_request_settings(
+                                const struct camera3_device *, int type);
+    static int process_capture_request(const struct camera3_device *,
+                                camera3_capture_request_t *request);
+
+    static void dump(const struct camera3_device *, int fd);
+    static int flush(const struct camera3_device *);
+    static int close_camera_device(struct hw_device_t* device);
+
+public:
+    QCamera3HardwareInterface(uint32_t cameraId,
+            const camera_module_callbacks_t *callbacks);
+    virtual ~QCamera3HardwareInterface();
+    static void camEvtHandle(uint32_t camera_handle, mm_camera_event_t *evt,
+                                          void *user_data);
+    int openCamera(struct hw_device_t **hw_device);
+    camera_metadata_t* translateCapabilityToMetadata(int type);
+
+    static int getCamInfo(uint32_t cameraId, struct camera_info *info);
+    static int initCapabilities(uint32_t cameraId);
+    static int initStaticMetadata(uint32_t cameraId);
+    static void makeTable(cam_dimension_t *dimTable, size_t size,
+            size_t max_size, int32_t *sizeTable);
+    static void makeFPSTable(cam_fps_range_t *fpsTable, size_t size,
+            size_t max_size, int32_t *fpsRangesTable);
+    static void makeOverridesList(cam_scene_mode_overrides_t *overridesTable,
+            size_t size, size_t max_size, uint8_t *overridesList,
+            uint8_t *supported_indexes, uint32_t camera_id);
+    static size_t filterJpegSizes(int32_t *jpegSizes, int32_t *processedSizes,
+            size_t processedSizesCnt, size_t maxCount, cam_rect_t active_array_size,
+            uint8_t downscale_factor);
+    static void convertToRegions(cam_rect_t rect, int32_t* region, int weight);
+    static void convertFromRegions(cam_area_t &roi, const camera_metadata_t *settings,
+                                   uint32_t tag);
+    static bool resetIfNeededROI(cam_area_t* roi, const cam_crop_region_t* scalerCropRegion);
+    static void convertLandmarks(cam_face_detection_info_t face, int32_t* landmarks);
+    static int32_t getScalarFormat(int32_t format);
+    static int32_t getSensorSensitivity(int32_t iso_mode);
+
+    double computeNoiseModelEntryS(int32_t sensitivity);
+    double computeNoiseModelEntryO(int32_t sensitivity);
+
+    static void captureResultCb(mm_camera_super_buf_t *metadata,
+                camera3_stream_buffer_t *buffer, uint32_t frame_number,
+                bool isInputBuffer, void *userdata);
+
+    int initialize(const camera3_callback_ops_t *callback_ops);
+    int configureStreams(camera3_stream_configuration_t *stream_list);
+    int configureStreamsPerfLocked(camera3_stream_configuration_t *stream_list);
+    int processCaptureRequest(camera3_capture_request_t *request);
+    void dump(int fd);
+    int flush();
+
+    int setFrameParameters(camera3_capture_request_t *request,
+            cam_stream_ID_t streamID, int blob_request, uint32_t snapshotStreamId);
+    int32_t setReprocParameters(camera3_capture_request_t *request,
+            metadata_buffer_t *reprocParam, uint32_t snapshotStreamId);
+    int translateToHalMetadata(const camera3_capture_request_t *request,
+            metadata_buffer_t *parm, uint32_t snapshotStreamId);
+    camera_metadata_t* translateCbUrgentMetadataToResultMetadata (
+                             metadata_buffer_t *metadata);
+    camera_metadata_t* translateFromHalMetadata(metadata_buffer_t *metadata,
+                            nsecs_t timestamp, int32_t request_id,
+                            const CameraMetadata& jpegMetadata, uint8_t pipeline_depth,
+                            uint8_t capture_intent, bool pprocDone);
+    camera_metadata_t* saveRequestSettings(const CameraMetadata& jpegMetadata,
+                            camera3_capture_request_t *request);
+    int initParameters();
+    void deinitParameters();
+    QCamera3ReprocessChannel *addOfflineReprocChannel(const reprocess_config_t &config,
+            QCamera3ProcessingChannel *inputChHandle);
+    bool needRotationReprocess();
+    bool needReprocess(uint32_t postprocess_mask);
+    bool needJpegRotation();
+    cam_denoise_process_type_t getWaveletDenoiseProcessPlate();
+    cam_denoise_process_type_t getTemporalDenoiseProcessPlate();
+
+    void captureResultCb(mm_camera_super_buf_t *metadata,
+                camera3_stream_buffer_t *buffer, uint32_t frame_number,
+                bool isInputBuffer);
+    cam_dimension_t calcMaxJpegDim();
+    bool needOnlineRotation();
+    uint32_t getJpegQuality();
+    QCamera3Exif *getExifData();
+    mm_jpeg_exif_params_t get3AExifParams();
+    uint8_t getMobicatMask();
+    static void getFlashInfo(const int cameraId,
+            bool& hasFlash,
+            char (&flashNode)[QCAMERA_MAX_FILEPATH_LENGTH]);
+    const char *getEepromVersionInfo();
+    const uint32_t *getLdafCalib();
+    void get3AVersion(cam_q3a_version_t &swVersion);
+
+    template <typename fwkType, typename halType> struct QCameraMap {
+        fwkType fwk_name;
+        halType hal_name;
+    };
+
+    typedef struct {
+        const char *const desc;
+        cam_cds_mode_type_t val;
+    } QCameraPropMap;
+
+
+private:
+
+    int openCamera();
+    int closeCamera();
+    static size_t calcMaxJpegSize(uint32_t camera_id);
+    cam_dimension_t getMaxRawSize(uint32_t camera_id);
+    static void addStreamConfig(Vector<int32_t> &available_stream_configs,
+            int32_t scalar_format, const cam_dimension_t &dim,
+            int32_t config_type);
+
+    int validateCaptureRequest(camera3_capture_request_t *request);
+    int validateStreamDimensions(camera3_stream_configuration_t *streamList);
+    int validateStreamRotations(camera3_stream_configuration_t *streamList);
+    void deriveMinFrameDuration();
+    int32_t handlePendingReprocResults(uint32_t frame_number);
+    int64_t getMinFrameDuration(const camera3_capture_request_t *request);
+    void handleMetadataWithLock(mm_camera_super_buf_t *metadata_buf,
+            bool free_and_bufdone_meta_buf);
+    void handleBatchMetadata(mm_camera_super_buf_t *metadata_buf,
+            bool free_and_bufdone_meta_buf);
+    void handleBufferWithLock(camera3_stream_buffer_t *buffer,
+            uint32_t frame_number);
+    void handleInputBufferWithLock(uint32_t frame_number);
+    void unblockRequestIfNecessary();
+    void dumpMetadataToFile(tuning_params_t &meta, uint32_t &dumpFrameCount,
+            bool enabled, const char *type, uint32_t frameNumber);
+    static void getLogLevel();
+
+    void cleanAndSortStreamInfo();
+    void extractJpegMetadata(CameraMetadata& jpegMetadata,
+            const camera3_capture_request_t *request);
+
+    bool isSupportChannelNeeded(camera3_stream_configuration_t *streamList,
+            cam_stream_size_info_t stream_config_info);
+    int32_t setMobicat();
+
+    int32_t setHalFpsRange(const CameraMetadata &settings,
+            metadata_buffer_t *hal_metadata);
+    int32_t extractSceneMode(const CameraMetadata &frame_settings, uint8_t metaMode,
+            metadata_buffer_t *hal_metadata);
+    int32_t numOfSizesOnEncoder(const camera3_stream_configuration_t *streamList,
+            const cam_dimension_t &maxViewfinderSize);
+
+    void enablePowerHint();
+    void disablePowerHint();
+    int32_t getSensorOutputSize(cam_dimension_t &sensor_dim);
+    int32_t dynamicUpdateMetaStreamInfo();
+    int32_t startAllChannels();
+    int32_t stopAllChannels();
+    int32_t notifyErrorForPendingRequests();
+    int32_t getReprocessibleOutputStreamId(uint32_t &id);
+
+    bool isOnEncoder(const cam_dimension_t max_viewfinder_size,
+            uint32_t width, uint32_t height);
+    void hdrPlusPerfLock(mm_camera_super_buf_t *metadata_buf);
+
+    int32_t setBundleInfo();
+
+    camera3_device_t   mCameraDevice;
+    uint32_t           mCameraId;
+    mm_camera_vtbl_t  *mCameraHandle;
+    bool               mCameraOpened;
+    bool               mCameraInitialized;
+    camera_metadata_t *mDefaultMetadata[CAMERA3_TEMPLATE_COUNT];
+    const camera3_callback_ops_t *mCallbackOps;
+
+    QCamera3MetadataChannel *mMetadataChannel;
+    QCamera3PicChannel *mPictureChannel;
+    QCamera3RawChannel *mRawChannel;
+    QCamera3SupportChannel *mSupportChannel;
+    QCamera3SupportChannel *mAnalysisChannel;
+    QCamera3RawDumpChannel *mRawDumpChannel;
+    QCamera3RegularChannel *mDummyBatchChannel;
+    QCameraPerfLock m_perfLock;
+
+    uint32_t mChannelHandle;
+
+    void saveExifParams(metadata_buffer_t *metadata);
+    mm_jpeg_exif_params_t mExifParams;
+
+     //First request yet to be processed after configureStreams
+    bool mFirstRequest;
+    bool mFirstConfiguration;
+    bool mFlush;
+    bool mEnableRawDump;
+    QCamera3HeapMemory *mParamHeap;
+    metadata_buffer_t* mParameters;
+    metadata_buffer_t* mPrevParameters;
+    CameraMetadata mCurJpegMeta;
+    bool m_bIsVideo;
+    bool m_bIs4KVideo;
+    bool m_bEisSupportedSize;
+    bool m_bEisEnable;
+    typedef struct {
+        cam_dimension_t dim;
+        int format;
+        uint32_t usage;
+    } InputStreamInfo;
+
+    InputStreamInfo mInputStreamInfo;
+    uint8_t m_MobicatMask;
+    uint8_t mSupportedFaceDetectMode;
+    uint8_t m_bTnrEnabled;
+    uint8_t m_bTnrPreview;
+    uint8_t m_bTnrVideo;
+    cam_cds_mode_type_t m_CdsPreference;
+    /* Data structure to store pending request */
+    typedef struct {
+        camera3_stream_t *stream;
+        camera3_stream_buffer_t *buffer;
+        // metadata needs to be consumed by the corresponding stream
+        // in order to generate the buffer.
+        bool need_metadata;
+    } RequestedBufferInfo;
+    typedef struct {
+        uint32_t frame_number;
+        uint32_t num_buffers;
+        int32_t request_id;
+        List<RequestedBufferInfo> buffers;
+        int blob_request;
+        uint8_t bUrgentReceived;
+        nsecs_t timestamp;
+        camera3_stream_buffer_t *input_buffer;
+        const camera_metadata_t *settings;
+        CameraMetadata jpegMetadata;
+        uint8_t pipeline_depth;
+        uint32_t partial_result_cnt;
+        uint8_t capture_intent;
+        bool shutter_notified;
+    } PendingRequestInfo;
+    typedef struct {
+        uint32_t frame_number;
+        uint32_t stream_ID;
+    } PendingFrameDropInfo;
+
+    // Store the Pending buffers for Flushing
+    typedef struct {
+        // Frame number pertaining to the buffer
+        uint32_t frame_number;
+        camera3_stream_t *stream;
+        // Buffer handle
+        buffer_handle_t *buffer;
+
+    } PendingBufferInfo;
+
+    typedef struct {
+        // Total number of buffer requests pending
+        uint32_t num_buffers;
+        // List of pending buffers
+        List<PendingBufferInfo> mPendingBufferList;
+    } PendingBuffersMap;
+
+    typedef struct {
+        camera3_notify_msg_t notify_msg;
+        camera3_stream_buffer_t buffer;
+        uint32_t frame_number;
+    } PendingReprocessResult;
+
+    typedef KeyedVector<uint32_t, Vector<PendingBufferInfo> > FlushMap;
+    typedef List<QCamera3HardwareInterface::PendingRequestInfo>::iterator
+            pendingRequestIterator;
+    typedef List<QCamera3HardwareInterface::RequestedBufferInfo>::iterator
+            pendingBufferIterator;
+
+    List<PendingReprocessResult> mPendingReprocessResultList;
+    List<PendingRequestInfo> mPendingRequestsList;
+    List<PendingFrameDropInfo> mPendingFrameDropList;
+    /* Use last frame number of the batch as key and first frame number of the
+     * batch as value for that key */
+    KeyedVector<uint32_t, uint32_t> mPendingBatchMap;
+
+    PendingBuffersMap mPendingBuffersMap;
+    pthread_cond_t mRequestCond;
+    uint32_t mPendingLiveRequest;
+    bool mWokenUpByDaemon;
+    int32_t mCurrentRequestId;
+    cam_stream_size_info_t mStreamConfigInfo;
+
+    //mutex for serialized access to camera3_device_ops_t functions
+    pthread_mutex_t mMutex;
+
+    List<stream_info_t*> mStreamInfo;
+
+    int64_t mMinProcessedFrameDuration;
+    int64_t mMinJpegFrameDuration;
+    int64_t mMinRawFrameDuration;
+
+    uint32_t mMetaFrameCount;
+    bool    mUpdateDebugLevel;
+    const camera_module_callbacks_t *mCallbacks;
+
+    uint8_t mCaptureIntent;
+    metadata_buffer_t mReprocMeta; //scratch meta buffer
+    /* 0: Not batch, non-zero: Number of image buffers in a batch */
+    uint8_t mBatchSize;
+    // Used only in batch mode
+    uint8_t mToBeQueuedVidBufs;
+    // Fixed video fps
+    float mHFRVideoFps;
+    uint8_t mOpMode;
+    uint32_t mFirstFrameNumberInBatch;
+    camera3_stream_t mDummyBatchStream;
+    bool mNeedSensorRestart;
+
+    /* sensor output size with current stream configuration */
+    QCamera3CropRegionMapper mCropRegionMapper;
+
+    /* Ldaf calibration data */
+    bool mLdafCalibExist;
+    uint32_t mLdafCalib[2];
+    bool mPowerHintEnabled;
+    int32_t mLastCustIntentFrmNum;
+
+    static const QCameraMap<camera_metadata_enum_android_control_effect_mode_t,
+            cam_effect_mode_type> EFFECT_MODES_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_control_awb_mode_t,
+            cam_wb_mode_type> WHITE_BALANCE_MODES_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_control_scene_mode_t,
+            cam_scene_mode_type> SCENE_MODES_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_control_af_mode_t,
+            cam_focus_mode_type> FOCUS_MODES_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_color_correction_aberration_mode_t,
+            cam_aberration_mode_t> COLOR_ABERRATION_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_control_ae_antibanding_mode_t,
+            cam_antibanding_mode_type> ANTIBANDING_MODES_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_lens_state_t,
+            cam_af_lens_state_t> LENS_STATE_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_control_ae_mode_t,
+            cam_flash_mode_t> AE_FLASH_MODE_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_flash_mode_t,
+            cam_flash_mode_t> FLASH_MODES_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_statistics_face_detect_mode_t,
+            cam_face_detect_mode_t> FACEDETECT_MODES_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_lens_info_focus_distance_calibration_t,
+            cam_focus_calibration_t> FOCUS_CALIBRATION_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_sensor_test_pattern_mode_t,
+            cam_test_pattern_mode_t> TEST_PATTERN_MAP[];
+    static const QCameraMap<camera_metadata_enum_android_sensor_reference_illuminant1_t,
+            cam_illuminat_t> REFERENCE_ILLUMINANT_MAP[];
+    static const QCameraMap<int32_t,
+            cam_hfr_mode_t> HFR_MODE_MAP[];
+
+    static const QCameraPropMap CDS_MAP[];
+
+    pendingRequestIterator erasePendingRequest(pendingRequestIterator i);
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA2HARDWAREINTERFACE_H__ */
diff --git a/camera/QCamera2/HAL3/QCamera3Mem.cpp b/camera/QCamera2/HAL3/QCamera3Mem.cpp
new file mode 100644
index 0000000..d1135cc
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3Mem.cpp
@@ -0,0 +1,1195 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define LOG_TAG "QCameraHWI_Mem"
+
+#include <string.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <utils/Log.h>
+#include <utils/Errors.h>
+#include <gralloc_priv.h>
+#include <qdMetaData.h>
+#include "QCamera3Mem.h"
+#include "QCamera3HWI.h"
+
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+using namespace android;
+
+namespace qcamera {
+
+// QCaemra2Memory base class
+
+/*===========================================================================
+ * FUNCTION   : QCamera3Memory
+ *
+ * DESCRIPTION: default constructor of QCamera3Memory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3Memory::QCamera3Memory()
+{
+    mBufferCount = 0;
+    for (int i = 0; i < MM_CAMERA_MAX_NUM_FRAMES; i++) {
+        mMemInfo[i].fd = -1;
+        mMemInfo[i].main_ion_fd = -1;
+        mMemInfo[i].handle = 0;
+        mMemInfo[i].size = 0;
+        mCurrentFrameNumbers[i] = -1;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3Memory
+ *
+ * DESCRIPTION: deconstructor of QCamera3Memory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3Memory::~QCamera3Memory()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : cacheOpsInternal
+ *
+ * DESCRIPTION: ion related memory cache operations
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @cmd     : cache ops command
+ *   @vaddr   : ptr to the virtual address
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3Memory::cacheOpsInternal(uint32_t index, unsigned int cmd, void *vaddr)
+{
+    Mutex::Autolock lock(mLock);
+
+    struct ion_flush_data cache_inv_data;
+    struct ion_custom_data custom_data;
+    int ret = OK;
+
+    if (MM_CAMERA_MAX_NUM_FRAMES <= index) {
+        ALOGE("%s: index %d out of bound [0, %d)",
+                __func__, index, MM_CAMERA_MAX_NUM_FRAMES);
+        return BAD_INDEX;
+    }
+
+    if (0 == mMemInfo[index].handle) {
+        ALOGE("%s: Buffer at %d not registered", __func__, index);
+        return BAD_INDEX;
+    }
+
+    memset(&cache_inv_data, 0, sizeof(cache_inv_data));
+    memset(&custom_data, 0, sizeof(custom_data));
+    cache_inv_data.vaddr = vaddr;
+    cache_inv_data.fd = mMemInfo[index].fd;
+    cache_inv_data.handle = mMemInfo[index].handle;
+    cache_inv_data.length = (unsigned int)mMemInfo[index].size;
+    custom_data.cmd = cmd;
+    custom_data.arg = (unsigned long)&cache_inv_data;
+
+    CDBG("%s: addr = %p, fd = %d, handle = %lx length = %d, ION Fd = %d",
+         __func__, cache_inv_data.vaddr, cache_inv_data.fd,
+         (unsigned long)cache_inv_data.handle, cache_inv_data.length,
+         mMemInfo[index].main_ion_fd);
+    ret = ioctl(mMemInfo[index].main_ion_fd, ION_IOC_CUSTOM, &custom_data);
+    if (ret < 0)
+        ALOGE("%s: Cache Invalidate failed: %s\n", __func__, strerror(errno));
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFd
+ *
+ * DESCRIPTION: return file descriptor of the indexed buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : file descriptor
+ *==========================================================================*/
+int QCamera3Memory::getFd(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (MM_CAMERA_MAX_NUM_FRAMES <= index) {
+        return BAD_INDEX;
+    }
+
+    if (0 == mMemInfo[index].handle) {
+        return BAD_INDEX;
+    }
+
+    return mMemInfo[index].fd;
+}
+
+/*===========================================================================
+ * FUNCTION   : getSize
+ *
+ * DESCRIPTION: return buffer size of the indexed buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer size
+ *==========================================================================*/
+ssize_t QCamera3Memory::getSize(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (MM_CAMERA_MAX_NUM_FRAMES <= index) {
+        return BAD_INDEX;
+    }
+
+    if (0 == mMemInfo[index].handle) {
+        return BAD_INDEX;
+    }
+
+    return (ssize_t)mMemInfo[index].size;
+}
+
+/*===========================================================================
+ * FUNCTION   : getCnt
+ *
+ * DESCRIPTION: query number of buffers allocated
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of buffers allocated
+ *==========================================================================*/
+uint32_t QCamera3Memory::getCnt()
+{
+    Mutex::Autolock lock(mLock);
+
+    return mBufferCount;
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufDef
+ *
+ * DESCRIPTION: query detailed buffer information
+ *
+ * PARAMETERS :
+ *   @offset  : [input] frame buffer offset
+ *   @bufDef  : [output] reference to struct to store buffer definition
+ *   @index   : [input] index of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Memory::getBufDef(const cam_frame_len_offset_t &offset,
+        mm_camera_buf_def_t &bufDef, uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (!mBufferCount) {
+        ALOGE("Memory not allocated");
+        return NO_INIT;
+    }
+
+    bufDef.fd = mMemInfo[index].fd;
+    bufDef.frame_len = mMemInfo[index].size;
+    bufDef.mem_info = (void *)this;
+    bufDef.planes_buf.num_planes = (int8_t)offset.num_planes;
+    bufDef.buffer = getPtrLocked(index);
+    bufDef.buf_idx = (uint8_t)index;
+
+    /* Plane 0 needs to be set separately. Set other planes in a loop */
+    bufDef.planes_buf.planes[0].length = offset.mp[0].len;
+    bufDef.planes_buf.planes[0].m.userptr = (long unsigned int)mMemInfo[index].fd;
+    bufDef.planes_buf.planes[0].data_offset = offset.mp[0].offset;
+    bufDef.planes_buf.planes[0].reserved[0] = 0;
+    for (int i = 1; i < bufDef.planes_buf.num_planes; i++) {
+         bufDef.planes_buf.planes[i].length = offset.mp[i].len;
+         bufDef.planes_buf.planes[i].m.userptr = (long unsigned int)mMemInfo[i].fd;
+         bufDef.planes_buf.planes[i].data_offset = offset.mp[i].offset;
+         bufDef.planes_buf.planes[i].reserved[0] =
+                 bufDef.planes_buf.planes[i-1].reserved[0] +
+                 bufDef.planes_buf.planes[i-1].length;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCamera3HeapMemory
+ *
+ * DESCRIPTION: constructor of QCamera3HeapMemory for ion memory used internally in HAL
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3HeapMemory::QCamera3HeapMemory(uint32_t maxCnt)
+    : QCamera3Memory()
+{
+    mMaxCnt = MIN(maxCnt, MM_CAMERA_MAX_NUM_FRAMES);
+    for (uint32_t i = 0; i < mMaxCnt; i ++)
+        mPtr[i] = NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3HeapMemory
+ *
+ * DESCRIPTION: deconstructor of QCamera3HeapMemory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3HeapMemory::~QCamera3HeapMemory()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : allocOneBuffer
+ *
+ * DESCRIPTION: impl of allocating one buffers of certain size
+ *
+ * PARAMETERS :
+ *   @memInfo : [output] reference to struct to store additional memory allocation info
+ *   @heap    : [input] heap id to indicate where the buffers will be allocated from
+ *   @size    : [input] lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HeapMemory::allocOneBuffer(QCamera3MemInfo &memInfo,
+        unsigned int heap_id, size_t size)
+{
+    int rc = OK;
+    struct ion_handle_data handle_data;
+    struct ion_allocation_data allocData;
+    struct ion_fd_data ion_info_fd;
+    int main_ion_fd = -1;
+
+    main_ion_fd = open("/dev/ion", O_RDONLY);
+    if (main_ion_fd < 0) {
+        ALOGE("Ion dev open failed: %s\n", strerror(errno));
+        goto ION_OPEN_FAILED;
+    }
+
+    memset(&allocData, 0, sizeof(allocData));
+    allocData.len = size;
+    /* to make it page size aligned */
+    allocData.len = (allocData.len + 4095U) & (~4095U);
+    allocData.align = 4096;
+    allocData.flags = ION_FLAG_CACHED;
+    allocData.heap_id_mask = heap_id;
+    rc = ioctl(main_ion_fd, ION_IOC_ALLOC, &allocData);
+    if (rc < 0) {
+        ALOGE("ION allocation for len %d failed: %s\n", allocData.len,
+            strerror(errno));
+        goto ION_ALLOC_FAILED;
+    }
+
+    memset(&ion_info_fd, 0, sizeof(ion_info_fd));
+    ion_info_fd.handle = allocData.handle;
+    rc = ioctl(main_ion_fd, ION_IOC_SHARE, &ion_info_fd);
+    if (rc < 0) {
+        ALOGE("ION map failed %s\n", strerror(errno));
+        goto ION_MAP_FAILED;
+    }
+
+    memInfo.main_ion_fd = main_ion_fd;
+    memInfo.fd = ion_info_fd.fd;
+    memInfo.handle = ion_info_fd.handle;
+    memInfo.size = allocData.len;
+    return OK;
+
+ION_MAP_FAILED:
+    memset(&handle_data, 0, sizeof(handle_data));
+    handle_data.handle = ion_info_fd.handle;
+    ioctl(main_ion_fd, ION_IOC_FREE, &handle_data);
+ION_ALLOC_FAILED:
+    close(main_ion_fd);
+ION_OPEN_FAILED:
+    return NO_MEMORY;
+}
+
+/*===========================================================================
+ * FUNCTION   : deallocOneBuffer
+ *
+ * DESCRIPTION: impl of deallocating one buffers
+ *
+ * PARAMETERS :
+ *   @memInfo : reference to struct that stores additional memory allocation info
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3HeapMemory::deallocOneBuffer(QCamera3MemInfo &memInfo)
+{
+    struct ion_handle_data handle_data;
+
+    if (memInfo.fd >= 0) {
+        close(memInfo.fd);
+        memInfo.fd = -1;
+    }
+
+    if (memInfo.main_ion_fd >= 0) {
+        memset(&handle_data, 0, sizeof(handle_data));
+        handle_data.handle = memInfo.handle;
+        ioctl(memInfo.main_ion_fd, ION_IOC_FREE, &handle_data);
+        close(memInfo.main_ion_fd);
+        memInfo.main_ion_fd = -1;
+    }
+    memInfo.handle = 0;
+    memInfo.size = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPtrLocked
+ *
+ * DESCRIPTION: Return buffer pointer.
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer ptr
+ *==========================================================================*/
+void *QCamera3HeapMemory::getPtrLocked(uint32_t index)
+{
+    if (index >= mBufferCount) {
+        ALOGE("index out of bound");
+        return (void *)BAD_INDEX;
+    }
+    return mPtr[index];
+}
+
+/*===========================================================================
+ * FUNCTION   : markFrameNumber
+ *
+ * DESCRIPTION: We use this function from the request call path to mark the
+ *              buffers with the frame number they are intended for this info
+ *              is used later when giving out callback & it is duty of PP to
+ *              ensure that data for that particular frameNumber/Request is
+ *              written to this buffer.
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @frame#  : Frame number from the framework
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3HeapMemory::markFrameNumber(uint32_t index, uint32_t frameNumber)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index >= mBufferCount) {
+        ALOGE("%s: Index %d out of bounds, current buffer count is %d",
+                __func__, index, mBufferCount);
+        return BAD_INDEX;
+    }
+
+    if (0 == mMemInfo[index].handle) {
+        ALOGE("%s: Buffer at %d not allocated", __func__, index);
+        return BAD_INDEX;
+    }
+
+    mCurrentFrameNumbers[index] = (int32_t)frameNumber;
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFrameNumber
+ *
+ * DESCRIPTION: We use this to fetch the frameNumber for the request with which
+ *              this buffer was given to HAL
+ *
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : int32_t frameNumber
+ *              positive/zero  -- success
+ *              negative failure
+ *==========================================================================*/
+int32_t QCamera3HeapMemory::getFrameNumber(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index >= mBufferCount) {
+        ALOGE("%s: Index %d out of bounds, current buffer count is %d",
+                __func__, index, mBufferCount);
+        return -1;
+    }
+
+    if (0 == mMemInfo[index].handle) {
+        ALOGE("%s: Buffer at %d not registered", __func__, index);
+        return -1;
+    }
+
+    return mCurrentFrameNumbers[index];
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufferIndex
+ *
+ * DESCRIPTION: We use this to fetch the buffer index for the request with
+ *              a particular frame number
+ *
+ *
+ * PARAMETERS :
+ *   @frameNumber  : frame number of the buffer
+ *
+ * RETURN     : int32_t buffer index
+ *              negative failure
+ *==========================================================================*/
+int32_t QCamera3HeapMemory::getBufferIndex(uint32_t frameNumber)
+{
+    Mutex::Autolock lock(mLock);
+
+    for (uint32_t index = 0;
+            index < mBufferCount; index++) {
+        if (mMemInfo[index].handle &&
+                mCurrentFrameNumbers[index] == (int32_t)frameNumber)
+            return (int32_t)index;
+    }
+    return -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPtr
+ *
+ * DESCRIPTION: Return buffer pointer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer ptr
+ *==========================================================================*/
+void *QCamera3HeapMemory::getPtr(uint32_t index)
+{
+    return getPtrLocked(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : allocate
+ *
+ * DESCRIPTION: allocate requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HeapMemory::allocate(size_t size)
+{
+    unsigned int heap_id_mask = 0x1 << ION_IOMMU_HEAP_ID;
+    uint32_t i;
+    int rc = NO_ERROR;
+
+    //Note that now we allow incremental allocation. In other words, we allow
+    //multiple alloc being called as long as the sum of count does not exceed
+    //mMaxCnt.
+    if (mBufferCount > 0) {
+        ALOGE("%s: There is already buffer allocated.", __func__);
+        return BAD_INDEX;
+    }
+
+    for (i = 0; i < mMaxCnt; i ++) {
+        rc = allocOneBuffer(mMemInfo[i], heap_id_mask, size);
+        if (rc < 0) {
+            ALOGE("AllocateIonMemory failed");
+            goto ALLOC_FAILED;
+        }
+
+        void *vaddr = mmap(NULL,
+                    mMemInfo[i].size,
+                    PROT_READ | PROT_WRITE,
+                    MAP_SHARED,
+                    mMemInfo[i].fd, 0);
+        if (vaddr == MAP_FAILED) {
+            deallocOneBuffer(mMemInfo[i]);
+            ALOGE("%s: mmap failed for buffer %d", __func__, i);
+            goto ALLOC_FAILED;
+        } else
+            mPtr[i] = vaddr;
+    }
+    if (rc == 0)
+        mBufferCount = mMaxCnt;
+
+    return OK;
+
+ALLOC_FAILED:
+    for (uint32_t j = 0; j < i; j++) {
+        munmap(mPtr[j], mMemInfo[j].size);
+        mPtr[j] = NULL;
+        deallocOneBuffer(mMemInfo[j]);
+    }
+    return NO_MEMORY;
+}
+
+/*===========================================================================
+ * FUNCTION   : allocateOne
+ *
+ * DESCRIPTION: allocate one buffer
+ *
+ * PARAMETERS :
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HeapMemory::allocateOne(size_t size)
+{
+    unsigned int heap_id_mask = 0x1 << ION_IOMMU_HEAP_ID;
+    uint32_t i;
+    int rc = NO_ERROR;
+
+    //Note that now we allow incremental allocation. In other words, we allow
+    //multiple alloc being called as long as the sum of count does not exceed
+    //mMaxCnt.
+    if (mBufferCount + 1 > mMaxCnt) {
+        ALOGE("Buffer count %d + 1 out of bound. Max is %d",
+                mBufferCount, mMaxCnt);
+        return BAD_INDEX;
+    }
+
+    rc = allocOneBuffer(mMemInfo[mBufferCount], heap_id_mask, size);
+    if (rc < 0) {
+        ALOGE("AllocateIonMemory failed");
+        return NO_MEMORY;
+    }
+
+    void *vaddr = mmap(NULL,
+                mMemInfo[mBufferCount].size,
+                PROT_READ | PROT_WRITE,
+                MAP_SHARED,
+                mMemInfo[mBufferCount].fd, 0);
+    if (vaddr == MAP_FAILED) {
+        deallocOneBuffer(mMemInfo[mBufferCount]);
+        ALOGE("%s: mmap failed for buffer", __func__);
+        return NO_MEMORY;
+    } else
+        mPtr[mBufferCount] = vaddr;
+
+    if (rc == 0)
+        mBufferCount += 1;
+
+    return mBufferCount-1;
+}
+
+/*===========================================================================
+ * FUNCTION   : deallocate
+ *
+ * DESCRIPTION: deallocate buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3HeapMemory::deallocate()
+{
+    for (uint32_t i = 0; i < mBufferCount; i++) {
+        munmap(mPtr[i], mMemInfo[i].size);
+        mPtr[i] = NULL;
+        deallocOneBuffer(mMemInfo[i]);
+        mCurrentFrameNumbers[i] = -1;
+    }
+    mBufferCount = 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : cacheOps
+ *
+ * DESCRIPTION: ion related memory cache operations
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @cmd     : cache ops command
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3HeapMemory::cacheOps(uint32_t index, unsigned int cmd)
+{
+    if (index >= mBufferCount)
+        return BAD_INDEX;
+    return cacheOpsInternal(index, cmd, mPtr[index]);
+}
+
+/*===========================================================================
+ * FUNCTION   : getMatchBufIndex
+ *
+ * DESCRIPTION: query buffer index by object ptr
+ *
+ * PARAMETERS :
+ *   @object  : object ptr
+ *
+ * RETURN     : buffer index if match found,
+ *              -1 if failed
+ *==========================================================================*/
+int QCamera3HeapMemory::getMatchBufIndex(void * /*object*/)
+{
+
+/*
+    TODO for HEAP memory type, would there be an equivalent requirement?
+
+    int index = -1;
+    buffer_handle_t *key = (buffer_handle_t*) object;
+    if (!key) {
+        return BAD_VALUE;
+    }
+    for (int i = 0; i < mBufferCount; i++) {
+        if (mBufferHandle[i] == key) {
+            index = i;
+            break;
+        }
+    }
+    return index;
+*/
+    ALOGE("%s: FATAL: Not supposed to come here", __func__);
+    return -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCamera3GrallocMemory
+ *
+ * DESCRIPTION: constructor of QCamera3GrallocMemory
+ *              preview stream buffers are allocated from gralloc native_windoe
+ *
+ * PARAMETERS :
+ *   @startIdx : start index of array after which we can register buffers in.
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3GrallocMemory::QCamera3GrallocMemory(uint32_t startIdx)
+        : QCamera3Memory(), mStartIdx(startIdx)
+{
+    for (int i = 0; i < MM_CAMERA_MAX_NUM_FRAMES; i ++) {
+        mBufferHandle[i] = NULL;
+        mPrivateHandle[i] = NULL;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3GrallocMemory
+ *
+ * DESCRIPTION: deconstructor of QCamera3GrallocMemory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+QCamera3GrallocMemory::~QCamera3GrallocMemory()
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : registerBuffer
+ *
+ * DESCRIPTION: registers frameworks-allocated gralloc buffer_handle_t
+ *
+ * PARAMETERS :
+ *   @buffers : buffer_handle_t pointer
+ *   @type :    cam_stream_type_t
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3GrallocMemory::registerBuffer(buffer_handle_t *buffer,
+        cam_stream_type_t type)
+{
+    status_t ret = NO_ERROR;
+    struct ion_fd_data ion_info_fd;
+    void *vaddr = NULL;
+    int32_t colorSpace = ITU_R_601_FR;
+    int32_t idx = -1;
+
+    CDBG("%s: E", __func__);
+
+    memset(&ion_info_fd, 0, sizeof(ion_info_fd));
+
+    if (0 <= getMatchBufIndex((void *) buffer)) {
+        ALOGV("%s: Buffer already registered", __func__);
+        return ALREADY_EXISTS;
+    }
+
+    Mutex::Autolock lock(mLock);
+    if (mBufferCount >= (MM_CAMERA_MAX_NUM_FRAMES - 1 - mStartIdx)) {
+        ALOGE("%s: Number of buffers %d greater than what's supported %d",
+                __func__, mBufferCount, MM_CAMERA_MAX_NUM_FRAMES - mStartIdx);
+        return BAD_INDEX;
+    }
+
+    idx = getFreeIndexLocked();
+    if (0 > idx) {
+        ALOGE("%s: No available memory slots", __func__);
+        return BAD_INDEX;
+    }
+
+    mBufferHandle[idx] = buffer;
+    mPrivateHandle[idx] = (struct private_handle_t *)(*mBufferHandle[idx]);
+
+    setMetaData(mPrivateHandle[idx], UPDATE_COLOR_SPACE, &colorSpace);
+
+    mMemInfo[idx].main_ion_fd = open("/dev/ion", O_RDONLY);
+    if (mMemInfo[idx].main_ion_fd < 0) {
+        ALOGE("%s: failed: could not open ion device", __func__);
+        ret = NO_MEMORY;
+        goto end;
+    } else {
+        ion_info_fd.fd = mPrivateHandle[idx]->fd;
+        if (ioctl(mMemInfo[idx].main_ion_fd,
+                  ION_IOC_IMPORT, &ion_info_fd) < 0) {
+            ALOGE("%s: ION import failed\n", __func__);
+            close(mMemInfo[idx].main_ion_fd);
+            ret = NO_MEMORY;
+            goto end;
+        }
+    }
+    CDBG("%s: idx = %d, fd = %d, size = %d, offset = %d",
+            __func__, idx, mPrivateHandle[idx]->fd,
+            mPrivateHandle[idx]->size,
+            mPrivateHandle[idx]->offset);
+    mMemInfo[idx].fd = mPrivateHandle[idx]->fd;
+    mMemInfo[idx].size =
+            ( /* FIXME: Should update ION interface */ size_t)
+            mPrivateHandle[idx]->size;
+    mMemInfo[idx].handle = ion_info_fd.handle;
+
+    vaddr = mmap(NULL,
+            mMemInfo[idx].size,
+            PROT_READ | PROT_WRITE,
+            MAP_SHARED,
+            mMemInfo[idx].fd, 0);
+    if (vaddr == MAP_FAILED) {
+        mMemInfo[idx].handle = 0;
+        ret = NO_MEMORY;
+    } else {
+        mPtr[idx] = vaddr;
+        mBufferCount++;
+    }
+
+end:
+    CDBG(" %s : X ",__func__);
+    return ret;
+}
+/*===========================================================================
+ * FUNCTION   : unregisterBufferLocked
+ *
+ * DESCRIPTION: Unregister buffer. Please note that this method has to be
+ *              called with 'mLock' acquired.
+ *
+ * PARAMETERS :
+ *   @idx     : unregister buffer at index 'idx'
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3GrallocMemory::unregisterBufferLocked(size_t idx)
+{
+    munmap(mPtr[idx], mMemInfo[idx].size);
+    mPtr[idx] = NULL;
+
+    struct ion_handle_data ion_handle;
+    memset(&ion_handle, 0, sizeof(ion_handle));
+    ion_handle.handle = mMemInfo[idx].handle;
+    if (ioctl(mMemInfo[idx].main_ion_fd, ION_IOC_FREE, &ion_handle) < 0) {
+        ALOGE("ion free failed");
+    }
+    close(mMemInfo[idx].main_ion_fd);
+    memset(&mMemInfo[idx], 0, sizeof(struct QCamera3MemInfo));
+    mMemInfo[idx].main_ion_fd = -1;
+    mBufferHandle[idx] = NULL;
+    mPrivateHandle[idx] = NULL;
+    mCurrentFrameNumbers[idx] = -1;
+    mBufferCount--;
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : unregisterBuffer
+ *
+ * DESCRIPTION: unregister buffer
+ *
+ * PARAMETERS :
+ *   @idx     : unregister buffer at index 'idx'
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3GrallocMemory::unregisterBuffer(size_t idx)
+{
+    int32_t rc = NO_ERROR;
+    Mutex::Autolock lock(mLock);
+
+    CDBG("%s: E ", __FUNCTION__);
+
+    if (MM_CAMERA_MAX_NUM_FRAMES <= idx) {
+        ALOGE("%s: Buffer index %d greater than what is supported %d",
+                __func__, idx, MM_CAMERA_MAX_NUM_FRAMES);
+        return BAD_VALUE;
+    }
+    if (idx < mStartIdx) {
+        ALOGE("%s: buffer index %d less than starting index %d",
+                __func__, idx, mStartIdx);
+        return BAD_INDEX;
+    }
+
+    if (0 == mMemInfo[idx].handle) {
+        ALOGE("%s: Trying to unregister buffer at %d which still not registered",
+                __func__, idx);
+        return BAD_VALUE;
+    }
+
+    rc = unregisterBufferLocked(idx);
+
+    CDBG(" %s : X ",__FUNCTION__);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : unregisterBuffers
+ *
+ * DESCRIPTION: unregister buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3GrallocMemory::unregisterBuffers()
+{
+    int err = NO_ERROR;
+    Mutex::Autolock lock(mLock);
+
+    CDBG("%s: E ", __FUNCTION__);
+
+    for (uint32_t cnt = mStartIdx; cnt < MM_CAMERA_MAX_NUM_FRAMES; cnt++) {
+        if (0 == mMemInfo[cnt].handle) {
+            continue;
+        }
+        err = unregisterBufferLocked(cnt);
+        if (NO_ERROR != err) {
+            ALOGE("%s: Error unregistering buffer %d error %d",
+                    __func__, cnt, err);
+        }
+    }
+    mBufferCount = 0;
+    CDBG(" %s : X ",__FUNCTION__);
+}
+
+/*===========================================================================
+ * FUNCTION   : markFrameNumber
+ *
+ * DESCRIPTION: We use this function from the request call path to mark the
+ *              buffers with the frame number they are intended for this info
+ *              is used later when giving out callback & it is duty of PP to
+ *              ensure that data for that particular frameNumber/Request is
+ *              written to this buffer.
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @frame#  : Frame number from the framework
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3GrallocMemory::markFrameNumber(uint32_t index, uint32_t frameNumber)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index >= MM_CAMERA_MAX_NUM_FRAMES) {
+        ALOGE("%s: Index out of bounds", __func__);
+        return BAD_INDEX;
+    }
+    if (index < mStartIdx) {
+        ALOGE("%s: buffer index %d less than starting index %d",
+                __func__, index, mStartIdx);
+        return BAD_INDEX;
+    }
+
+    if (0 == mMemInfo[index].handle) {
+        ALOGE("%s: Buffer at %d not registered", __func__, index);
+        return BAD_INDEX;
+    }
+
+    mCurrentFrameNumbers[index] = (int32_t)frameNumber;
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFrameNumber
+ *
+ * DESCRIPTION: We use this to fetch the frameNumber for the request with which
+ *              this buffer was given to HAL
+ *
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : int32_t frameNumber
+ *              positive/zero  -- success
+ *              negative failure
+ *==========================================================================*/
+int32_t QCamera3GrallocMemory::getFrameNumber(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index >= MM_CAMERA_MAX_NUM_FRAMES) {
+        ALOGE("%s: Index out of bounds", __func__);
+        return -1;
+    }
+    if (index < mStartIdx) {
+        ALOGE("%s: buffer index %d less than starting index %d",
+                __func__, index, mStartIdx);
+        return BAD_INDEX;
+    }
+
+    if (0 == mMemInfo[index].handle) {
+        ALOGE("%s: Buffer at %d not registered", __func__, index);
+        return -1;
+    }
+
+    return mCurrentFrameNumbers[index];
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufferIndex
+ *
+ * DESCRIPTION: We use this to fetch the buffer index for the request with
+ *              a particular frame number
+ *
+ *
+ * PARAMETERS :
+ *   @frameNumber  : frame number of the buffer
+ *
+ * RETURN     : int32_t buffer index
+ *              negative failure
+ *==========================================================================*/
+int32_t QCamera3GrallocMemory::getBufferIndex(uint32_t frameNumber)
+{
+    for (uint32_t index = mStartIdx;
+            index < MM_CAMERA_MAX_NUM_FRAMES; index++) {
+        if (mMemInfo[index].handle &&
+                mCurrentFrameNumbers[index] == (int32_t)frameNumber)
+            return (int32_t)index;
+    }
+    return -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : cacheOps
+ *
+ * DESCRIPTION: ion related memory cache operations
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @cmd     : cache ops command
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3GrallocMemory::cacheOps(uint32_t index, unsigned int cmd)
+{
+    if (index >= MM_CAMERA_MAX_NUM_FRAMES) {
+        ALOGE("%s: Index out of bounds", __func__);
+        return -1;
+    }
+    if (index < mStartIdx) {
+        ALOGE("%s: buffer index %d less than starting index %d",
+                __func__, index, mStartIdx);
+        return BAD_INDEX;
+    }
+
+    return cacheOpsInternal(index, cmd, mPtr[index]);
+}
+
+/*===========================================================================
+ * FUNCTION   : getMatchBufIndex
+ *
+ * DESCRIPTION: query buffer index by object ptr
+ *
+ * PARAMETERS :
+ *   @opaque  : opaque ptr
+ *
+ * RETURN     : buffer index if match found,
+ *              -1 if failed
+ *==========================================================================*/
+int QCamera3GrallocMemory::getMatchBufIndex(void *object)
+{
+    Mutex::Autolock lock(mLock);
+
+    int index = -1;
+    buffer_handle_t *key = (buffer_handle_t*) object;
+    if (!key) {
+        return BAD_VALUE;
+    }
+    for (uint32_t i = mStartIdx; i < MM_CAMERA_MAX_NUM_FRAMES; i++) {
+        if (mBufferHandle[i] == key) {
+            index = (int)i;
+            break;
+        }
+    }
+
+    return index;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFreeIndexLocked
+ *
+ * DESCRIPTION: Find free index slot. Note 'mLock' needs to be acquired
+ *              before calling this method.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : free buffer index if found,
+ *              -1 if failed
+ *==========================================================================*/
+int QCamera3GrallocMemory::getFreeIndexLocked()
+{
+    int index = -1;
+
+    if (mBufferCount >= (MM_CAMERA_MAX_NUM_FRAMES - 1)) {
+        ALOGE("%s: Number of buffers %d greater than what's supported %d",
+            __func__, mBufferCount, MM_CAMERA_MAX_NUM_FRAMES);
+        return index;
+    }
+
+    for (size_t i = mStartIdx; i < MM_CAMERA_MAX_NUM_FRAMES; i++) {
+        if (0 == mMemInfo[i].handle) {
+            index = i;
+            break;
+        }
+    }
+
+    return index;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPtrLocked
+ *
+ * DESCRIPTION: Return buffer pointer. Please note 'mLock' must be acquired
+ *              before calling this method.
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer ptr
+ *==========================================================================*/
+void *QCamera3GrallocMemory::getPtrLocked(uint32_t index)
+{
+    if (MM_CAMERA_MAX_NUM_FRAMES <= index) {
+        ALOGE("%s: index %d out of bound [0, %d)",
+                __func__, index, MM_CAMERA_MAX_NUM_FRAMES);
+        return NULL;
+    }
+    if (index < mStartIdx) {
+        ALOGE("%s: buffer index %d less than starting index %d",
+                __func__, index, mStartIdx);
+        return NULL;
+    }
+
+
+    if (0 == mMemInfo[index].handle) {
+        ALOGE("%s: Buffer at %d not registered", __func__, index);
+        return NULL;
+    }
+
+    return mPtr[index];
+}
+
+/*===========================================================================
+ * FUNCTION   : getPtr
+ *
+ * DESCRIPTION: Return buffer pointer.
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer ptr
+ *==========================================================================*/
+void *QCamera3GrallocMemory::getPtr(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+    return getPtrLocked(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufferHandle
+ *
+ * DESCRIPTION: return framework pointer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer ptr if match found
+                NULL if failed
+ *==========================================================================*/
+void *QCamera3GrallocMemory::getBufferHandle(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (MM_CAMERA_MAX_NUM_FRAMES <= index) {
+        ALOGE("%s: index %d out of bound [0, %d)",
+                __func__, index, MM_CAMERA_MAX_NUM_FRAMES);
+        return NULL;
+    }
+    if (index < mStartIdx) {
+        ALOGE("%s: buffer index %d less than starting index %d",
+                __func__, index, mStartIdx);
+        return NULL;
+    }
+
+    if (0 == mMemInfo[index].handle) {
+        ALOGE("%s: Buffer at %d not registered", __func__, index);
+        return NULL;
+    }
+
+    return mBufferHandle[index];
+}
+}; //namespace qcamera
diff --git a/camera/QCamera2/HAL3/QCamera3Mem.h b/camera/QCamera2/HAL3/QCamera3Mem.h
new file mode 100644
index 0000000..317b20c
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3Mem.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA3HWI_MEM_H__
+#define __QCAMERA3HWI_MEM_H__
+#include <hardware/camera3.h>
+#include <utils/Mutex.h>
+
+extern "C" {
+#include <sys/types.h>
+#include <linux/msm_ion.h>
+#include <mm_camera_interface.h>
+}
+
+using namespace android;
+
+namespace qcamera {
+
+// Base class for all memory types. Abstract.
+class QCamera3Memory {
+
+public:
+    int cleanCache(uint32_t index)
+    {
+        return cacheOps(index, ION_IOC_CLEAN_CACHES);
+    }
+    int invalidateCache(uint32_t index)
+    {
+        return cacheOps(index, ION_IOC_INV_CACHES);
+    }
+    int cleanInvalidateCache(uint32_t index)
+    {
+        return cacheOps(index, ION_IOC_CLEAN_INV_CACHES);
+    }
+    int getFd(uint32_t index);
+    ssize_t getSize(uint32_t index);
+    uint32_t getCnt();
+
+    virtual int cacheOps(uint32_t index, unsigned int cmd) = 0;
+    virtual int getMatchBufIndex(void *object) = 0;
+    virtual void *getPtr(uint32_t index) = 0;
+
+    virtual int32_t markFrameNumber(uint32_t index, uint32_t frameNumber) = 0;
+    virtual int32_t getFrameNumber(uint32_t index) = 0;
+    virtual int32_t getBufferIndex(uint32_t frameNumber) = 0;
+
+    QCamera3Memory();
+    virtual ~QCamera3Memory();
+
+    int32_t getBufDef(const cam_frame_len_offset_t &offset,
+            mm_camera_buf_def_t &bufDef, uint32_t index);
+
+protected:
+    struct QCamera3MemInfo {
+        int fd;
+        int main_ion_fd;
+        ion_user_handle_t handle;
+        size_t size;
+    };
+
+    int cacheOpsInternal(uint32_t index, unsigned int cmd, void *vaddr);
+    virtual void *getPtrLocked(uint32_t index) = 0;
+
+    uint32_t mBufferCount;
+    struct QCamera3MemInfo mMemInfo[MM_CAMERA_MAX_NUM_FRAMES];
+    void *mPtr[MM_CAMERA_MAX_NUM_FRAMES];
+    int32_t mCurrentFrameNumbers[MM_CAMERA_MAX_NUM_FRAMES];
+    Mutex mLock;
+};
+
+// Internal heap memory is used for memories used internally
+// They are allocated from /dev/ion. Examples are: capabilities,
+// parameters, metadata, and internal YUV data for jpeg encoding.
+class QCamera3HeapMemory : public QCamera3Memory {
+public:
+    QCamera3HeapMemory(uint32_t maxCnt);
+    virtual ~QCamera3HeapMemory();
+
+    int allocate(size_t size);
+    int allocateOne(size_t size);
+    void deallocate();
+
+    virtual int cacheOps(uint32_t index, unsigned int cmd);
+    virtual int getMatchBufIndex(void *object);
+    virtual void *getPtr(uint32_t index);
+
+    virtual int32_t markFrameNumber(uint32_t index, uint32_t frameNumber);
+    virtual int32_t getFrameNumber(uint32_t index);
+    virtual int32_t getBufferIndex(uint32_t frameNumber);
+
+protected:
+    virtual void *getPtrLocked(uint32_t index);
+private:
+    int allocOneBuffer(struct QCamera3MemInfo &memInfo,
+            unsigned int heap_id, size_t size);
+    void deallocOneBuffer(struct QCamera3MemInfo &memInfo);
+    bool mQueueAll;
+    uint32_t mMaxCnt;
+};
+
+// Gralloc Memory shared with frameworks
+class QCamera3GrallocMemory : public QCamera3Memory {
+public:
+    QCamera3GrallocMemory(uint32_t startIdx);
+    virtual ~QCamera3GrallocMemory();
+
+    int registerBuffer(buffer_handle_t *buffer, cam_stream_type_t type);
+    int32_t unregisterBuffer(size_t idx);
+    void unregisterBuffers();
+    virtual int cacheOps(uint32_t index, unsigned int cmd);
+    virtual int getMatchBufIndex(void *object);
+    virtual void *getPtr(uint32_t index);
+
+    virtual int32_t markFrameNumber(uint32_t index, uint32_t frameNumber);
+    virtual int32_t getFrameNumber(uint32_t index);
+    virtual int32_t getBufferIndex(uint32_t frameNumber);
+
+    void *getBufferHandle(uint32_t index);
+protected:
+    virtual void *getPtrLocked(uint32_t index);
+private:
+    int32_t unregisterBufferLocked(size_t idx);
+    int32_t getFreeIndexLocked();
+    buffer_handle_t *mBufferHandle[MM_CAMERA_MAX_NUM_FRAMES];
+    struct private_handle_t *mPrivateHandle[MM_CAMERA_MAX_NUM_FRAMES];
+
+    uint32_t mStartIdx;
+};
+};
+#endif
diff --git a/camera/QCamera2/HAL3/QCamera3PostProc.cpp b/camera/QCamera2/HAL3/QCamera3PostProc.cpp
new file mode 100755
index 0000000..d714d87
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3PostProc.cpp
@@ -0,0 +1,2744 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#define LOG_TAG "QCamera3PostProc"
+//#define LOG_NDEBUG 0
+
+#include <stdlib.h>
+#include <utils/Errors.h>
+#include <utils/Trace.h>
+#include <cutils/properties.h>
+
+#include "QCamera3PostProc.h"
+#include "QCamera3HWI.h"
+#include "QCamera3Channel.h"
+#include "QCamera3Stream.h"
+
+namespace qcamera {
+
+static const char ExifAsciiPrefix[] =
+    { 0x41, 0x53, 0x43, 0x49, 0x49, 0x0, 0x0, 0x0 };          // "ASCII\0\0\0"
+static const char ExifUndefinedPrefix[] =
+    { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };   // "\0\0\0\0\0\0\0\0"
+
+#define EXIF_ASCII_PREFIX_SIZE           8   //(sizeof(ExifAsciiPrefix))
+#define FOCAL_LENGTH_DECIMAL_PRECISION   1000
+
+/*===========================================================================
+ * FUNCTION   : QCamera3PostProcessor
+ *
+ * DESCRIPTION: constructor of QCamera3PostProcessor.
+ *
+ * PARAMETERS :
+ *   @cam_ctrl : ptr to HWI object
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3PostProcessor::QCamera3PostProcessor(QCamera3ProcessingChannel* ch_ctrl)
+    : m_parent(ch_ctrl),
+      mJpegCB(NULL),
+      mJpegUserData(NULL),
+      mJpegClientHandle(0),
+      mJpegSessionId(0),
+      m_bThumbnailNeeded(TRUE),
+      m_pReprocChannel(NULL),
+      m_inputPPQ(releasePPInputData, this),
+      m_inputFWKPPQ(NULL, this),
+      m_ongoingPPQ(releaseOngoingPPData, this),
+      m_inputJpegQ(releaseJpegData, this),
+      m_ongoingJpegQ(releaseJpegData, this),
+      m_inputMetaQ(releaseMetadata, this),
+      m_jpegSettingsQ(NULL, this)
+{
+    memset(&mJpegHandle, 0, sizeof(mJpegHandle));
+    pthread_mutex_init(&mReprocJobLock, NULL);
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3PostProcessor
+ *
+ * DESCRIPTION: deconstructor of QCamera3PostProcessor.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3PostProcessor::~QCamera3PostProcessor()
+{
+    pthread_mutex_destroy(&mReprocJobLock);
+}
+
+/*===========================================================================
+ * FUNCTION   : init
+ *
+ * DESCRIPTION: initialization of postprocessor
+ *
+ * PARAMETERS :
+ *   @memory              : output buffer memory
+ *   @postprocess_mask    : postprocess mask for the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::init(QCamera3StreamMem *memory,
+        uint32_t postprocess_mask)
+{
+    ATRACE_CALL();
+    mOutputMem = memory;
+    mPostProcMask = postprocess_mask;
+    m_dataProcTh.launch(dataProcessRoutine, this);
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : deinit
+ *
+ * DESCRIPTION: de-initialization of postprocessor
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::deinit()
+{
+    int rc = NO_ERROR;
+    m_dataProcTh.exit();
+
+    if (m_pReprocChannel != NULL) {
+        m_pReprocChannel->stop();
+        delete m_pReprocChannel;
+        m_pReprocChannel = NULL;
+    }
+
+    if(mJpegClientHandle > 0) {
+        rc = mJpegHandle.close(mJpegClientHandle);
+        CDBG_HIGH("%s: Jpeg closed, rc = %d, mJpegClientHandle = %x",
+              __func__, rc, mJpegClientHandle);
+        mJpegClientHandle = 0;
+        memset(&mJpegHandle, 0, sizeof(mJpegHandle));
+    }
+
+    mOutputMem = NULL;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : initJpeg
+ *
+ * DESCRIPTION: initialization of jpeg through postprocessor
+ *
+ * PARAMETERS :
+ *   @jpeg_cb      : callback to handle jpeg event from mm-camera-interface
+ *   @max_pic_dim  : max picture dimensions
+ *   @user_data    : user data ptr for jpeg callback
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::initJpeg(jpeg_encode_callback_t jpeg_cb,
+        cam_dimension_t* max_pic_dim,
+        void *user_data)
+{
+    ATRACE_CALL();
+    mJpegCB = jpeg_cb;
+    mJpegUserData = user_data;
+    mm_dimension max_size;
+
+    if ((0 > max_pic_dim->width) || (0 > max_pic_dim->height)) {
+        ALOGE("%s : Negative dimension %dx%d", __func__,
+                max_pic_dim->width, max_pic_dim->height);
+        return BAD_VALUE;
+    }
+
+    //set max pic size
+    memset(&max_size, 0, sizeof(mm_dimension));
+    max_size.w =  max_pic_dim->width;
+    max_size.h =  max_pic_dim->height;
+
+    mJpegClientHandle = jpeg_open(&mJpegHandle, max_size);
+    if(!mJpegClientHandle) {
+        ALOGE("%s : jpeg_open did not work", __func__);
+        return UNKNOWN_ERROR;
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : start
+ *
+ * DESCRIPTION: start postprocessor. Data process thread and data notify thread
+ *              will be launched.
+ *
+ * PARAMETERS :
+ *   @config        : reprocess configuration
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : if any reprocess is needed, a reprocess channel/stream
+ *              will be started.
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::start(const reprocess_config_t &config)
+{
+    int32_t rc = NO_ERROR;
+    QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData;
+
+    if (config.reprocess_type != REPROCESS_TYPE_NONE) {
+        if (m_pReprocChannel != NULL) {
+            m_pReprocChannel->stop();
+            delete m_pReprocChannel;
+            m_pReprocChannel = NULL;
+        }
+
+        // if reprocess is needed, start reprocess channel
+        CDBG("%s: Setting input channel as pInputChannel", __func__);
+        m_pReprocChannel = hal_obj->addOfflineReprocChannel(config, m_parent);
+        if (m_pReprocChannel == NULL) {
+            ALOGE("%s: cannot add reprocess channel", __func__);
+            return UNKNOWN_ERROR;
+        }
+        /*start the reprocess channel only if buffers are already allocated, thus
+          only start it in an intermediate reprocess type, defer it for others*/
+        if (config.reprocess_type == REPROCESS_TYPE_JPEG) {
+            rc = m_pReprocChannel->start();
+            if (rc != 0) {
+                ALOGE("%s: cannot start reprocess channel", __func__);
+                delete m_pReprocChannel;
+                m_pReprocChannel = NULL;
+                return rc;
+            }
+        }
+    }
+    m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_START_DATA_PROC, TRUE, FALSE);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop
+ *
+ * DESCRIPTION: stop postprocessor. Data process and notify thread will be stopped.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : reprocess channel will be stopped and deleted if there is any
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::stop()
+{
+    m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_STOP_DATA_PROC, TRUE, TRUE);
+
+    if (m_pReprocChannel != NULL) {
+        m_pReprocChannel->stop();
+        delete m_pReprocChannel;
+        m_pReprocChannel = NULL;
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFWKJpegEncodeConfig
+ *
+ * DESCRIPTION: function to prepare encoding job information
+ *
+ * PARAMETERS :
+ *   @encode_parm   : param to be filled with encoding configuration
+ *   @frame         : framework input buffer
+ *   @jpeg_settings : jpeg settings to be applied for encoding
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::getFWKJpegEncodeConfig(
+        mm_jpeg_encode_params_t& encode_parm,
+        qcamera_fwk_input_pp_data_t *frame,
+        jpeg_settings_t *jpeg_settings)
+{
+    CDBG("%s : E", __func__);
+    int32_t ret = NO_ERROR;
+
+    if ((NULL == frame) || (NULL == jpeg_settings)) {
+        return BAD_VALUE;
+    }
+
+    ssize_t bufSize = mOutputMem->getSize(jpeg_settings->out_buf_index);
+    if (BAD_INDEX == bufSize) {
+        ALOGE("%s: cannot retrieve buffer size for buffer %u", __func__,
+                jpeg_settings->out_buf_index);
+        return BAD_VALUE;
+    }
+
+    encode_parm.jpeg_cb = mJpegCB;
+    encode_parm.userdata = mJpegUserData;
+
+    if (jpeg_settings->thumbnail_size.width > 0 &&
+            jpeg_settings->thumbnail_size.height > 0)
+        m_bThumbnailNeeded = TRUE;
+    else
+        m_bThumbnailNeeded = FALSE;
+    encode_parm.encode_thumbnail = m_bThumbnailNeeded;
+
+    // get color format
+    cam_format_t img_fmt = frame->reproc_config.stream_format;
+    encode_parm.color_format = getColorfmtFromImgFmt(img_fmt);
+
+    // get jpeg quality
+    encode_parm.quality = jpeg_settings->jpeg_quality;
+    if (encode_parm.quality <= 0) {
+        encode_parm.quality = 85;
+    }
+
+    // get jpeg thumbnail quality
+    encode_parm.thumb_quality = jpeg_settings->jpeg_thumb_quality;
+
+    cam_frame_len_offset_t main_offset =
+            frame->reproc_config.input_stream_plane_info.plane_info;
+
+    encode_parm.num_src_bufs = 1;
+    encode_parm.src_main_buf[0].index = 0;
+    encode_parm.src_main_buf[0].buf_size = frame->input_buffer.frame_len;
+    encode_parm.src_main_buf[0].buf_vaddr = (uint8_t *) frame->input_buffer.buffer;
+    encode_parm.src_main_buf[0].fd = frame->input_buffer.fd;
+    encode_parm.src_main_buf[0].format = MM_JPEG_FMT_YUV;
+    encode_parm.src_main_buf[0].offset = main_offset;
+
+    //Pass input thumbnail buffer info to encoder.
+    //Note: Use main buffer to encode thumbnail
+    if (m_bThumbnailNeeded == TRUE) {
+        encode_parm.num_tmb_bufs = 1;
+        encode_parm.src_thumb_buf[0] = encode_parm.src_main_buf[0];
+    }
+
+    //Pass output jpeg buffer info to encoder.
+    //mOutputMem is allocated by framework.
+    encode_parm.num_dst_bufs = 1;
+    encode_parm.dest_buf[0].index = 0;
+    encode_parm.dest_buf[0].buf_size = (size_t)bufSize;
+    encode_parm.dest_buf[0].buf_vaddr = (uint8_t *)mOutputMem->getPtr(
+            jpeg_settings->out_buf_index);
+    encode_parm.dest_buf[0].fd = mOutputMem->getFd(
+            jpeg_settings->out_buf_index);
+    encode_parm.dest_buf[0].format = MM_JPEG_FMT_YUV;
+    encode_parm.dest_buf[0].offset = main_offset;
+
+    CDBG("%s : X", __func__);
+    return NO_ERROR;
+
+on_error:
+    CDBG("%s : X with error %d", __func__, ret);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getJpegEncodeConfig
+ *
+ * DESCRIPTION: function to prepare encoding job information
+ *
+ * PARAMETERS :
+ *   @encode_parm   : param to be filled with encoding configuration
+ *   #main_stream   : stream object where the input buffer comes from
+ *   @jpeg_settings : jpeg settings to be applied for encoding
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::getJpegEncodeConfig(
+                mm_jpeg_encode_params_t& encode_parm,
+                QCamera3Stream *main_stream,
+                jpeg_settings_t *jpeg_settings)
+{
+    CDBG("%s : E", __func__);
+    int32_t ret = NO_ERROR;
+    ssize_t bufSize = 0;
+
+    encode_parm.jpeg_cb = mJpegCB;
+    encode_parm.userdata = mJpegUserData;
+
+    if (jpeg_settings->thumbnail_size.width > 0 &&
+            jpeg_settings->thumbnail_size.height > 0)
+        m_bThumbnailNeeded = TRUE;
+    else
+        m_bThumbnailNeeded = FALSE;
+    encode_parm.encode_thumbnail = m_bThumbnailNeeded;
+
+    // get color format
+    cam_format_t img_fmt = CAM_FORMAT_YUV_420_NV12;  //default value
+    main_stream->getFormat(img_fmt);
+    encode_parm.color_format = getColorfmtFromImgFmt(img_fmt);
+
+    // get jpeg quality
+    encode_parm.quality = jpeg_settings->jpeg_quality;
+    if (encode_parm.quality <= 0) {
+        encode_parm.quality = 85;
+    }
+
+    // get jpeg thumbnail quality
+    encode_parm.thumb_quality = jpeg_settings->jpeg_thumb_quality;
+
+    cam_frame_len_offset_t main_offset;
+    memset(&main_offset, 0, sizeof(cam_frame_len_offset_t));
+    main_stream->getFrameOffset(main_offset);
+
+    // src buf config
+    //Pass input main image buffer info to encoder.
+    QCamera3StreamMem *pStreamMem = main_stream->getStreamBufs();
+    if (pStreamMem == NULL) {
+        ALOGE("%s: cannot get stream bufs from main stream", __func__);
+        ret = BAD_VALUE;
+        goto on_error;
+    }
+    encode_parm.num_src_bufs = MIN(pStreamMem->getCnt(), MM_JPEG_MAX_BUF);
+    for (uint32_t i = 0; i < encode_parm.num_src_bufs; i++) {
+        if (pStreamMem != NULL) {
+            encode_parm.src_main_buf[i].index = i;
+            bufSize = pStreamMem->getSize(i);
+            if (BAD_INDEX == bufSize) {
+                ALOGE("%s: cannot retrieve buffer size for buffer %u", __func__, i);
+                ret = BAD_VALUE;
+                goto on_error;
+            }
+            encode_parm.src_main_buf[i].buf_size = (size_t)bufSize;
+            encode_parm.src_main_buf[i].buf_vaddr = (uint8_t *)pStreamMem->getPtr(i);
+            encode_parm.src_main_buf[i].fd = pStreamMem->getFd(i);
+            encode_parm.src_main_buf[i].format = MM_JPEG_FMT_YUV;
+            encode_parm.src_main_buf[i].offset = main_offset;
+        }
+    }
+
+    //Pass input thumbnail buffer info to encoder.
+    //Note: Use main buffer to encode thumbnail
+    if (m_bThumbnailNeeded == TRUE) {
+        pStreamMem = main_stream->getStreamBufs();
+        if (pStreamMem == NULL) {
+            ALOGE("%s: cannot get stream bufs from thumb stream", __func__);
+            ret = BAD_VALUE;
+            goto on_error;
+        }
+        cam_frame_len_offset_t thumb_offset;
+        memset(&thumb_offset, 0, sizeof(cam_frame_len_offset_t));
+        main_stream->getFrameOffset(thumb_offset);
+        encode_parm.num_tmb_bufs = MIN(pStreamMem->getCnt(), MM_JPEG_MAX_BUF);
+        for (uint32_t i = 0; i < encode_parm.num_tmb_bufs; i++) {
+            if (pStreamMem != NULL) {
+                encode_parm.src_thumb_buf[i].index = i;
+                bufSize = pStreamMem->getSize(i);
+                if (BAD_INDEX == bufSize) {
+                    ALOGE("%s: cannot retrieve buffer size for buffer %u", __func__, i);
+                    ret = BAD_VALUE;
+                    goto on_error;
+                }
+                encode_parm.src_thumb_buf[i].buf_size = (uint32_t)bufSize;
+                encode_parm.src_thumb_buf[i].buf_vaddr = (uint8_t *)pStreamMem->getPtr(i);
+                encode_parm.src_thumb_buf[i].fd = pStreamMem->getFd(i);
+                encode_parm.src_thumb_buf[i].format = MM_JPEG_FMT_YUV;
+                encode_parm.src_thumb_buf[i].offset = thumb_offset;
+            }
+        }
+    }
+
+    //Pass output jpeg buffer info to encoder.
+    //mJpegMem is allocated by framework.
+    bufSize = mOutputMem->getSize(jpeg_settings->out_buf_index);
+    if (BAD_INDEX == bufSize) {
+        ALOGE("%s: cannot retrieve buffer size for buffer %u", __func__,
+                jpeg_settings->out_buf_index);
+        ret = BAD_VALUE;
+        goto on_error;
+    }
+    encode_parm.num_dst_bufs = 1;
+    encode_parm.dest_buf[0].index = 0;
+    encode_parm.dest_buf[0].buf_size = (size_t)bufSize;
+    encode_parm.dest_buf[0].buf_vaddr = (uint8_t *)mOutputMem->getPtr(
+            jpeg_settings->out_buf_index);
+    encode_parm.dest_buf[0].fd = mOutputMem->getFd(
+            jpeg_settings->out_buf_index);
+    encode_parm.dest_buf[0].format = MM_JPEG_FMT_YUV;
+    encode_parm.dest_buf[0].offset = main_offset;
+
+    CDBG("%s : X", __func__);
+    return NO_ERROR;
+
+on_error:
+    CDBG("%s : X with error %d", __func__, ret);
+    return ret;
+}
+
+int32_t QCamera3PostProcessor::processData(mm_camera_super_buf_t *input) {
+    return processData(input, NULL, 0);
+}
+
+/*===========================================================================
+ * FUNCTION   : processData
+ *
+ * DESCRIPTION: enqueue data into dataProc thread
+ *
+ * PARAMETERS :
+ *   @frame   : process input frame
+ *   @output  : process output frame
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : depends on if offline reprocess is needed, received frame will
+ *              be sent to either input queue of postprocess or jpeg encoding
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::processData(mm_camera_super_buf_t *input,
+        buffer_handle_t *output, uint32_t frameNumber)
+{
+    CDBG("%s: E", __func__);
+    QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData;
+    pthread_mutex_lock(&mReprocJobLock);
+
+    // enqueue to post proc input queue
+    qcamera_hal3_pp_buffer_t *pp_buffer = (qcamera_hal3_pp_buffer_t *)malloc(
+            sizeof(qcamera_hal3_pp_buffer_t));
+    if (NULL == pp_buffer) {
+        ALOGE("%s: out of memory", __func__);
+        return NO_MEMORY;
+    }
+    memset(pp_buffer, 0, sizeof(*pp_buffer));
+    pp_buffer->input = input;
+    pp_buffer->output = output;
+    pp_buffer->frameNumber = frameNumber;
+    m_inputPPQ.enqueue((void *)pp_buffer);
+    if (!(m_inputMetaQ.isEmpty())) {
+        CDBG("%s: meta queue is not empty, do next job", __func__);
+        m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    } else
+        CDBG("%s: metadata queue is empty", __func__);
+    pthread_mutex_unlock(&mReprocJobLock);
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : processData
+ *
+ * DESCRIPTION: enqueue data into dataProc thread
+ *
+ * PARAMETERS :
+ *   @frame   : process frame
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : depends on if offline reprocess is needed, received frame will
+ *              be sent to either input queue of postprocess or jpeg encoding
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::processData(qcamera_fwk_input_pp_data_t *frame)
+{
+    QCamera3HardwareInterface* hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData;
+    if (frame->reproc_config.reprocess_type != REPROCESS_TYPE_NONE) {
+        pthread_mutex_lock(&mReprocJobLock);
+        // enqueu to post proc input queue
+        m_inputFWKPPQ.enqueue((void *)frame);
+        m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+        pthread_mutex_unlock(&mReprocJobLock);
+    } else {
+        jpeg_settings_t *jpeg_settings = (jpeg_settings_t *)m_jpegSettingsQ.dequeue();
+
+        if (jpeg_settings == NULL) {
+            ALOGE("%s: Cannot find jpeg settings", __func__);
+            return BAD_VALUE;
+        }
+
+        CDBG_HIGH("%s: no need offline reprocess, sending to jpeg encoding", __func__);
+        qcamera_hal3_jpeg_data_t *jpeg_job =
+            (qcamera_hal3_jpeg_data_t *)malloc(sizeof(qcamera_hal3_jpeg_data_t));
+        if (jpeg_job == NULL) {
+            ALOGE("%s: No memory for jpeg job", __func__);
+            return NO_MEMORY;
+        }
+
+        memset(jpeg_job, 0, sizeof(qcamera_hal3_jpeg_data_t));
+        jpeg_job->fwk_frame = frame;
+        jpeg_job->jpeg_settings = jpeg_settings;
+        jpeg_job->metadata =
+                (metadata_buffer_t *) frame->metadata_buffer.buffer;
+
+        // enqueu to jpeg input queue
+        m_inputJpegQ.enqueue((void *)jpeg_job);
+        m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    }
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : processPPMetadata
+ *
+ * DESCRIPTION: enqueue data into dataProc thread
+ *
+ * PARAMETERS :
+ *   @frame   : process metadata frame received from pic channel
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::processPPMetadata(mm_camera_super_buf_t *reproc_meta)
+{
+    CDBG("%s: E", __func__);
+    pthread_mutex_lock(&mReprocJobLock);
+    // enqueue to metadata input queue
+    m_inputMetaQ.enqueue((void *)reproc_meta);
+    if (!(m_inputPPQ.isEmpty())) {
+       CDBG("%s: pp queue is not empty, do next job", __func__);
+       m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    } else {
+       CDBG("%s: pp queue is empty, not calling do next job", __func__);
+    }
+    pthread_mutex_unlock(&mReprocJobLock);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : processJpegSettingData
+ *
+ * DESCRIPTION: enqueue jpegSetting into dataProc thread
+ *
+ * PARAMETERS :
+ *   @jpeg_settings : jpeg settings data received from pic channel
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::processJpegSettingData(
+        jpeg_settings_t *jpeg_settings)
+{
+    if (!jpeg_settings) {
+        ALOGE("%s: invalid jpeg settings pointer", __func__);
+        return -EINVAL;
+    }
+    return m_jpegSettingsQ.enqueue((void *)jpeg_settings);
+}
+
+/*===========================================================================
+ * FUNCTION   : processPPData
+ *
+ * DESCRIPTION: process received frame after reprocess.
+ *
+ * PARAMETERS :
+ *   @frame   : received frame from reprocess channel.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ * NOTE       : The frame after reprocess need to send to jpeg encoding.
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::processPPData(mm_camera_super_buf_t *frame)
+{
+    qcamera_hal3_pp_data_t *job = (qcamera_hal3_pp_data_t *)m_ongoingPPQ.dequeue();
+
+    if (job == NULL || ((NULL == job->src_frame) && (NULL == job->fwk_src_frame))) {
+        ALOGE("%s: Cannot find reprocess job", __func__);
+        return BAD_VALUE;
+    }
+    if (job->jpeg_settings == NULL) {
+        ALOGE("%s: Cannot find jpeg settings", __func__);
+        return BAD_VALUE;
+    }
+
+    qcamera_hal3_jpeg_data_t *jpeg_job =
+        (qcamera_hal3_jpeg_data_t *)malloc(sizeof(qcamera_hal3_jpeg_data_t));
+    if (jpeg_job == NULL) {
+        ALOGE("%s: No memory for jpeg job", __func__);
+        return NO_MEMORY;
+    }
+
+    memset(jpeg_job, 0, sizeof(qcamera_hal3_jpeg_data_t));
+    jpeg_job->src_frame = frame;
+    if(frame != job->src_frame)
+        jpeg_job->src_reproc_frame = job->src_frame;
+    if (NULL == job->fwk_src_frame) {
+        jpeg_job->metadata = job->metadata;
+    } else {
+        jpeg_job->metadata =
+                (metadata_buffer_t *) job->fwk_src_frame->metadata_buffer.buffer;
+        jpeg_job->fwk_src_buffer = job->fwk_src_frame;
+    }
+    jpeg_job->src_metadata = job->src_metadata;
+    jpeg_job->jpeg_settings = job->jpeg_settings;
+
+    // free pp job buf
+    free(job);
+
+    // enqueu reprocessed frame to jpeg input queue
+    m_inputJpegQ.enqueue((void *)jpeg_job);
+
+    // wait up data proc thread
+    m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : dequeuePPJob
+ *
+ * DESCRIPTION: find a postprocessing job from ongoing pp queue by frame number
+ *
+ * PARAMETERS :
+ *   @frameNumber : frame number for the pp job
+ *
+ * RETURN     : ptr to a pp job struct. NULL if not found.
+ *==========================================================================*/
+qcamera_hal3_pp_data_t *QCamera3PostProcessor::dequeuePPJob(uint32_t frameNumber) {
+    qcamera_hal3_pp_data_t *pp_job = NULL;
+    pp_job = (qcamera_hal3_pp_data_t *)m_ongoingPPQ.dequeue();
+
+    if (pp_job == NULL) {
+        ALOGE("%s: Fatal: ongoing PP queue is empty", __func__);
+        return NULL;
+    }
+    if (pp_job->fwk_src_frame &&
+            (pp_job->fwk_src_frame->frameNumber != frameNumber)) {
+        ALOGE("%s: head of pp queue doesn't match requested frame number", __func__);
+    }
+    return pp_job;
+}
+
+/*===========================================================================
+ * FUNCTION   : findJpegJobByJobId
+ *
+ * DESCRIPTION: find a jpeg job from ongoing Jpeg queue by its job ID
+ *
+ * PARAMETERS :
+ *   @jobId   : job Id of the job
+ *
+ * RETURN     : ptr to a jpeg job struct. NULL if not found.
+ *
+ * NOTE       : Currently only one job is sending to mm-jpeg-interface for jpeg
+ *              encoding. Therefore simply dequeue from the ongoing Jpeg Queue
+ *              will serve the purpose to find the jpeg job.
+ *==========================================================================*/
+qcamera_hal3_jpeg_data_t *QCamera3PostProcessor::findJpegJobByJobId(uint32_t jobId)
+{
+    qcamera_hal3_jpeg_data_t * job = NULL;
+    if (jobId == 0) {
+        ALOGE("%s: not a valid jpeg jobId", __func__);
+        return NULL;
+    }
+
+    // currely only one jpeg job ongoing, so simply dequeue the head
+    job = (qcamera_hal3_jpeg_data_t *)m_ongoingJpegQ.dequeue();
+    return job;
+}
+
+/*===========================================================================
+ * FUNCTION   : releasePPInputData
+ *
+ * DESCRIPTION: callback function to release post process input data node
+ *
+ * PARAMETERS :
+ *   @data      : ptr to post process input data
+ *   @user_data : user data ptr (QCamera3Reprocessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera3PostProcessor::releasePPInputData(void *data, void *user_data)
+{
+    QCamera3PostProcessor *pme = (QCamera3PostProcessor *)user_data;
+    if (NULL != pme) {
+        qcamera_hal3_pp_buffer_t *buf = (qcamera_hal3_pp_buffer_t *)data;
+        if (NULL != buf) {
+            if (buf->input) {
+                pme->releaseSuperBuf(buf->input);
+                free(buf->input);
+                buf->input = NULL;
+            }
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseMetaData
+ *
+ * DESCRIPTION: callback function to release metadata camera buffer
+ *
+ * PARAMETERS :
+ *   @data      : ptr to post process input data
+ *   @user_data : user data ptr (QCamera3Reprocessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera3PostProcessor::releaseMetadata(void *data, void *user_data)
+{
+    QCamera3PostProcessor *pme = (QCamera3PostProcessor *)user_data;
+    if (NULL != pme) {
+        pme->m_parent->metadataBufDone((mm_camera_super_buf_t *)data);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseJpegData
+ *
+ * DESCRIPTION: callback function to release jpeg job node
+ *
+ * PARAMETERS :
+ *   @data      : ptr to ongoing jpeg job data
+ *   @user_data : user data ptr (QCamera3Reprocessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera3PostProcessor::releaseJpegData(void *data, void *user_data)
+{
+    QCamera3PostProcessor *pme = (QCamera3PostProcessor *)user_data;
+    if (NULL != pme) {
+        pme->releaseJpegJobData((qcamera_hal3_jpeg_data_t *)data);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseOngoingPPData
+ *
+ * DESCRIPTION: callback function to release ongoing postprocess job node
+ *
+ * PARAMETERS :
+ *   @data      : ptr to onging postprocess job
+ *   @user_data : user data ptr (QCamera3Reprocessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera3PostProcessor::releaseOngoingPPData(void *data, void *user_data)
+{
+    QCamera3PostProcessor *pme = (QCamera3PostProcessor *)user_data;
+    if (NULL != pme) {
+        qcamera_hal3_pp_data_t *pp_data = (qcamera_hal3_pp_data_t *)data;
+
+        if (pp_data && pp_data->src_frame)
+          pme->releaseSuperBuf(pp_data->src_frame);
+
+        pme->releasePPJobData(pp_data);
+
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseSuperBuf
+ *
+ * DESCRIPTION: function to release a superbuf frame by returning back to kernel
+ *
+ * PARAMETERS :
+ *   @super_buf : ptr to the superbuf frame
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera3PostProcessor::releaseSuperBuf(mm_camera_super_buf_t *super_buf)
+{
+    if (NULL != super_buf) {
+        if (m_parent != NULL) {
+            m_parent->bufDone(super_buf);
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseOfflineBuffers
+ *
+ * DESCRIPTION: function to release/unmap offline buffers if any
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::releaseOfflineBuffers()
+{
+    int32_t rc = NO_ERROR;
+
+    if(NULL != m_pReprocChannel) {
+        rc = m_pReprocChannel->unmapOfflineBuffers(false);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseJpegJobData
+ *
+ * DESCRIPTION: function to release internal resources in jpeg job struct
+ *
+ * PARAMETERS :
+ *   @job     : ptr to jpeg job struct
+ *
+ * RETURN     : None
+ *
+ * NOTE       : original source frame need to be queued back to kernel for
+ *              future use. Output buf of jpeg job need to be released since
+ *              it's allocated for each job. Exif object need to be deleted.
+ *==========================================================================*/
+void QCamera3PostProcessor::releaseJpegJobData(qcamera_hal3_jpeg_data_t *job)
+{
+    ATRACE_CALL();
+    int32_t rc = NO_ERROR;
+    CDBG("%s: E", __func__);
+    if (NULL != job) {
+        if (NULL != job->src_reproc_frame) {
+            free(job->src_reproc_frame);
+            job->src_reproc_frame = NULL;
+        }
+
+        if (NULL != job->src_frame) {
+            if (NULL != m_pReprocChannel) {
+                rc = m_pReprocChannel->bufDone(job->src_frame);
+                if (NO_ERROR != rc)
+                    ALOGE("%s: bufDone error: %d", __func__, rc);
+            }
+            free(job->src_frame);
+            job->src_frame = NULL;
+        }
+
+        if (NULL != job->fwk_src_buffer) {
+            free(job->fwk_src_buffer);
+            job->fwk_src_buffer = NULL;
+        } else if (NULL != job->src_metadata) {
+            m_parent->metadataBufDone(job->src_metadata);
+            free(job->src_metadata);
+            job->src_metadata = NULL;
+        }
+
+        if (NULL != job->fwk_frame) {
+            free(job->fwk_frame);
+            job->fwk_frame = NULL;
+        }
+
+        if (NULL != job->pJpegExifObj) {
+            delete job->pJpegExifObj;
+            job->pJpegExifObj = NULL;
+        }
+
+        if (NULL != job->jpeg_settings) {
+            free(job->jpeg_settings);
+            job->jpeg_settings = NULL;
+        }
+    }
+    /* Additional trigger to process any pending jobs in the input queue */
+    m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    CDBG("%s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : releasePPJobData
+ *
+ * DESCRIPTION: function to release internal resources in p pjob struct
+ *
+ * PARAMETERS :
+ *   @job     : ptr to pp job struct
+ *
+ * RETURN     : None
+ *
+ * NOTE       : Original source metadata buffer needs to be released and
+ *              queued back to kernel for future use. src_frame, src_metadata,
+ *              and fwk_src_frame structures need to be freed.
+ *==========================================================================*/
+void QCamera3PostProcessor::releasePPJobData(qcamera_hal3_pp_data_t *pp_job)
+{
+    ATRACE_CALL();
+    CDBG("%s: E", __func__);
+    if (NULL != pp_job) {
+        if (NULL != pp_job->src_frame) {
+            free(pp_job->src_frame);
+            if (NULL != pp_job->src_metadata) {
+                m_parent->metadataBufDone(pp_job->src_metadata);
+                free(pp_job->src_metadata);
+            }
+            pp_job->src_frame = NULL;
+            pp_job->metadata = NULL;
+        }
+
+        if (NULL != pp_job->fwk_src_frame) {
+            free(pp_job->fwk_src_frame);
+            pp_job->fwk_src_frame = NULL;
+        }
+    }
+
+    /* Additional trigger to process any pending jobs in the input queue */
+    m_dataProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    CDBG("%s: X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : getColorfmtFromImgFmt
+ *
+ * DESCRIPTION: function to return jpeg color format based on its image format
+ *
+ * PARAMETERS :
+ *   @img_fmt : image format
+ *
+ * RETURN     : jpeg color format that can be understandable by omx lib
+ *==========================================================================*/
+mm_jpeg_color_format QCamera3PostProcessor::getColorfmtFromImgFmt(cam_format_t img_fmt)
+{
+    switch (img_fmt) {
+    case CAM_FORMAT_YUV_420_NV21:
+        return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2;
+    case CAM_FORMAT_YUV_420_NV21_ADRENO:
+        return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2;
+    case CAM_FORMAT_YUV_420_NV12:
+        return MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2;
+    case CAM_FORMAT_YUV_420_YV12:
+        return MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2;
+    case CAM_FORMAT_YUV_422_NV61:
+        return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V1;
+    case CAM_FORMAT_YUV_422_NV16:
+        return MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V1;
+    default:
+        return MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getJpegImgTypeFromImgFmt
+ *
+ * DESCRIPTION: function to return jpeg encode image type based on its image format
+ *
+ * PARAMETERS :
+ *   @img_fmt : image format
+ *
+ * RETURN     : return jpeg source image format (YUV or Bitstream)
+ *==========================================================================*/
+mm_jpeg_format_t QCamera3PostProcessor::getJpegImgTypeFromImgFmt(cam_format_t img_fmt)
+{
+    switch (img_fmt) {
+    case CAM_FORMAT_YUV_420_NV21:
+    case CAM_FORMAT_YUV_420_NV21_ADRENO:
+    case CAM_FORMAT_YUV_420_NV12:
+    case CAM_FORMAT_YUV_420_YV12:
+    case CAM_FORMAT_YUV_422_NV61:
+    case CAM_FORMAT_YUV_422_NV16:
+        return MM_JPEG_FMT_YUV;
+    default:
+        return MM_JPEG_FMT_YUV;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : encodeFWKData
+ *
+ * DESCRIPTION: function to prepare encoding job information and send to
+ *              mm-jpeg-interface to do the encoding job
+ *
+ * PARAMETERS :
+ *   @jpeg_job_data : ptr to a struct saving job related information
+ *   @needNewSess   : flag to indicate if a new jpeg encoding session need
+ *                    to be created. After creation, this flag will be toggled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::encodeFWKData(qcamera_hal3_jpeg_data_t *jpeg_job_data,
+        uint8_t &needNewSess)
+{
+    CDBG("%s : E", __func__);
+    int32_t ret = NO_ERROR;
+    mm_jpeg_job_t jpg_job;
+    uint32_t jobId = 0;
+    qcamera_fwk_input_pp_data_t *recvd_frame = NULL;
+    metadata_buffer_t *metadata = NULL;
+    jpeg_settings_t *jpeg_settings = NULL;
+    QCamera3HardwareInterface* hal_obj = NULL;
+    bool needJpegRotation = false;
+
+    if (NULL == jpeg_job_data) {
+        ALOGE("%s: Invalid jpeg job", __func__);
+        return BAD_VALUE;
+    }
+
+    recvd_frame = jpeg_job_data->fwk_frame;
+    if (NULL == recvd_frame) {
+        ALOGE("%s: Invalid input buffer", __func__);
+        return BAD_VALUE;
+    }
+
+    metadata = jpeg_job_data->metadata;
+    if (NULL == metadata) {
+        ALOGE("%s: Invalid metadata buffer", __func__);
+        return BAD_VALUE;
+    }
+
+    jpeg_settings = jpeg_job_data->jpeg_settings;
+    if (NULL == jpeg_settings) {
+        ALOGE("%s: Invalid jpeg settings buffer", __func__);
+        return BAD_VALUE;
+    }
+
+    if ((NULL != jpeg_job_data->src_frame) && (NULL != jpeg_job_data->src_frame)) {
+        ALOGE("%s: Unsupported case both framework and camera source buffers are invalid!",
+                __func__);
+        return BAD_VALUE;
+    }
+
+    hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData;
+
+    if (mJpegClientHandle <= 0) {
+        ALOGE("%s: Error: bug here, mJpegClientHandle is 0", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    cam_dimension_t src_dim;
+    memset(&src_dim, 0, sizeof(cam_dimension_t));
+    src_dim.width = recvd_frame->reproc_config.input_stream_dim.width;
+    src_dim.height = recvd_frame->reproc_config.input_stream_dim.height;
+
+    cam_dimension_t dst_dim;
+    memset(&dst_dim, 0, sizeof(cam_dimension_t));
+    dst_dim.width = recvd_frame->reproc_config.output_stream_dim.width;
+    dst_dim.height = recvd_frame->reproc_config.output_stream_dim.height;
+
+    CDBG_HIGH("%s: Need new session?:%d",__func__, needNewSess);
+    if (needNewSess) {
+        //creating a new session, so we must destroy the old one
+        if ( 0 < mJpegSessionId ) {
+            ret = mJpegHandle.destroy_session(mJpegSessionId);
+            if (ret != NO_ERROR) {
+                ALOGE("%s: Error destroying an old jpeg encoding session, id = %d",
+                      __func__, mJpegSessionId);
+                return ret;
+            }
+            mJpegSessionId = 0;
+        }
+        // create jpeg encoding session
+        mm_jpeg_encode_params_t encodeParam;
+        memset(&encodeParam, 0, sizeof(mm_jpeg_encode_params_t));
+        encodeParam.main_dim.src_dim = src_dim;
+        encodeParam.main_dim.dst_dim = dst_dim;
+        encodeParam.thumb_dim.src_dim = src_dim;
+        encodeParam.thumb_dim.dst_dim = jpeg_settings->thumbnail_size;
+
+        getFWKJpegEncodeConfig(encodeParam, recvd_frame, jpeg_settings);
+        CDBG_HIGH("%s: #src bufs:%d # tmb bufs:%d #dst_bufs:%d", __func__,
+                     encodeParam.num_src_bufs,encodeParam.num_tmb_bufs,encodeParam.num_dst_bufs);
+
+        ret = mJpegHandle.create_session(mJpegClientHandle, &encodeParam, &mJpegSessionId);
+        if (ret != NO_ERROR) {
+            ALOGE("%s: Error creating a new jpeg encoding session, ret = %d", __func__, ret);
+            return ret;
+        }
+        needNewSess = FALSE;
+    }
+
+    // Fill in new job
+    memset(&jpg_job, 0, sizeof(mm_jpeg_job_t));
+    jpg_job.job_type = JPEG_JOB_TYPE_ENCODE;
+    jpg_job.encode_job.session_id = mJpegSessionId;
+    jpg_job.encode_job.src_index = 0;
+    jpg_job.encode_job.dst_index = 0;
+
+    cam_rect_t crop;
+    memset(&crop, 0, sizeof(cam_rect_t));
+    //TBD_later - Zoom event removed in stream
+    //main_stream->getCropInfo(crop);
+
+    // Set main dim job parameters and handle rotation
+    needJpegRotation = hal_obj->needJpegRotation();
+    if (!needJpegRotation && (jpeg_settings->jpeg_orientation == 90 ||
+            jpeg_settings->jpeg_orientation == 270)) {
+
+        jpg_job.encode_job.main_dim.src_dim.width = src_dim.height;
+        jpg_job.encode_job.main_dim.src_dim.height = src_dim.width;
+
+        jpg_job.encode_job.main_dim.dst_dim.width = dst_dim.height;
+        jpg_job.encode_job.main_dim.dst_dim.height = dst_dim.width;
+
+        jpg_job.encode_job.main_dim.crop.width = crop.height;
+        jpg_job.encode_job.main_dim.crop.height = crop.width;
+        jpg_job.encode_job.main_dim.crop.left = crop.top;
+        jpg_job.encode_job.main_dim.crop.top = crop.left;
+    } else {
+        jpg_job.encode_job.main_dim.src_dim = src_dim;
+        jpg_job.encode_job.main_dim.dst_dim = dst_dim;
+        jpg_job.encode_job.main_dim.crop = crop;
+    }
+
+    QCamera3HardwareInterface* obj = (QCamera3HardwareInterface*)m_parent->mUserData;
+    // get 3a sw version info
+    cam_q3a_version_t sw_version;
+    memset(&sw_version, 0, sizeof(sw_version));
+    if (obj)
+        obj->get3AVersion(sw_version);
+
+    // get exif data
+    QCamera3Exif *pJpegExifObj = getExifData(metadata, jpeg_settings);
+    jpeg_job_data->pJpegExifObj = pJpegExifObj;
+    if (pJpegExifObj != NULL) {
+        jpg_job.encode_job.exif_info.exif_data = pJpegExifObj->getEntries();
+        jpg_job.encode_job.exif_info.numOfEntries =
+            pJpegExifObj->getNumOfEntries();
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[0] =
+            sw_version.major_version;
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[1] =
+            sw_version.minor_version;
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[2] =
+            sw_version.patch_version;
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[3] =
+            sw_version.new_feature_des;
+    }
+
+    // thumbnail dim
+    CDBG_HIGH("%s: Thumbnail needed:%d",__func__, m_bThumbnailNeeded);
+    if (m_bThumbnailNeeded == TRUE) {
+        memset(&crop, 0, sizeof(cam_rect_t));
+        jpg_job.encode_job.thumb_dim.dst_dim =
+                jpeg_settings->thumbnail_size;
+
+        if (needJpegRotation) {
+            jpg_job.encode_job.rotation = (uint32_t)jpeg_settings->jpeg_orientation;
+            CDBG_HIGH("%s: jpeg rotation is set to %u", __func__, jpg_job.encode_job.rotation);
+        } else if (jpeg_settings->jpeg_orientation  == 90 ||
+                jpeg_settings->jpeg_orientation == 270) {
+            //swap the thumbnail destination width and height if it has
+            //already been rotated
+            int temp = jpg_job.encode_job.thumb_dim.dst_dim.width;
+            jpg_job.encode_job.thumb_dim.dst_dim.width =
+                    jpg_job.encode_job.thumb_dim.dst_dim.height;
+            jpg_job.encode_job.thumb_dim.dst_dim.height = temp;
+        }
+        jpg_job.encode_job.thumb_dim.src_dim = src_dim;
+        jpg_job.encode_job.thumb_dim.crop = crop;
+        jpg_job.encode_job.thumb_index = 0;
+    }
+
+    if (metadata != NULL) {
+       //Fill in the metadata passed as parameter
+       jpg_job.encode_job.p_metadata = metadata;
+    } else {
+       ALOGE("%s: Metadata is null", __func__);
+    }
+
+    jpg_job.encode_job.hal_version = CAM_HAL_V3;
+
+    //Start jpeg encoding
+    ret = mJpegHandle.start_job(&jpg_job, &jobId);
+    if (ret == NO_ERROR) {
+        // remember job info
+        jpeg_job_data->jobId = jobId;
+    }
+
+    CDBG("%s : X", __func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : encodeData
+ *
+ * DESCRIPTION: function to prepare encoding job information and send to
+ *              mm-jpeg-interface to do the encoding job
+ *
+ * PARAMETERS :
+ *   @jpeg_job_data : ptr to a struct saving job related information
+ *   @needNewSess   : flag to indicate if a new jpeg encoding session need
+ *                    to be created. After creation, this flag will be toggled
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3PostProcessor::encodeData(qcamera_hal3_jpeg_data_t *jpeg_job_data,
+                          uint8_t &needNewSess)
+{
+    ATRACE_CALL();
+    CDBG("%s : E", __func__);
+    int32_t ret = NO_ERROR;
+    mm_jpeg_job_t jpg_job;
+    uint32_t jobId = 0;
+    QCamera3Stream *main_stream = NULL;
+    mm_camera_buf_def_t *main_frame = NULL;
+    QCamera3Channel *srcChannel = NULL;
+    mm_camera_super_buf_t *recvd_frame = NULL;
+    metadata_buffer_t *metadata = NULL;
+    jpeg_settings_t *jpeg_settings = NULL;
+    QCamera3HardwareInterface* hal_obj = NULL;
+    if (m_parent != NULL) {
+       hal_obj = (QCamera3HardwareInterface*)m_parent->mUserData;
+    } else {
+       ALOGE("%s: m_parent is NULL, Error",__func__);
+       return BAD_VALUE;
+    }
+    bool needJpegRotation = false;
+
+    recvd_frame = jpeg_job_data->src_frame;
+    metadata = jpeg_job_data->metadata;
+    jpeg_settings = jpeg_job_data->jpeg_settings;
+
+    CDBG("%s: encoding bufIndex: %u", __func__,
+        jpeg_job_data->src_frame->bufs[0]->buf_idx);
+
+    QCamera3Channel *pChannel = NULL;
+    // first check picture channel
+    if (m_parent->getMyHandle() == recvd_frame->ch_id) {
+        pChannel = m_parent;
+    }
+    // check reprocess channel if not found
+    if (pChannel == NULL) {
+        if (m_pReprocChannel != NULL &&
+            m_pReprocChannel->getMyHandle() == recvd_frame->ch_id) {
+            pChannel = m_pReprocChannel;
+        }
+    }
+
+    srcChannel = pChannel;
+
+    if (srcChannel == NULL) {
+        ALOGE("%s: No corresponding channel (ch_id = %d) exist, return here",
+              __func__, recvd_frame->ch_id);
+        return BAD_VALUE;
+    }
+
+    // find snapshot frame and thumnail frame
+    //Note: In this version we will receive only snapshot frame.
+    for (uint32_t i = 0; i < recvd_frame->num_bufs; i++) {
+        QCamera3Stream *srcStream =
+            srcChannel->getStreamByHandle(recvd_frame->bufs[i]->stream_id);
+        if (srcStream != NULL) {
+            switch (srcStream->getMyType()) {
+            case CAM_STREAM_TYPE_SNAPSHOT:
+            case CAM_STREAM_TYPE_OFFLINE_PROC:
+                main_stream = srcStream;
+                main_frame = recvd_frame->bufs[i];
+                break;
+            default:
+                break;
+            }
+        }
+    }
+
+    if(NULL == main_frame){
+       ALOGE("%s : Main frame is NULL", __func__);
+       return BAD_VALUE;
+    }
+
+    QCamera3StreamMem *memObj = (QCamera3StreamMem *)main_frame->mem_info;
+    if (NULL == memObj) {
+        ALOGE("%s : Memeory Obj of main frame is NULL", __func__);
+        return NO_MEMORY;
+    }
+
+    // clean and invalidate cache ops through mem obj of the frame
+    memObj->cleanInvalidateCache(main_frame->buf_idx);
+
+    if (mJpegClientHandle <= 0) {
+        ALOGE("%s: Error: bug here, mJpegClientHandle is 0", __func__);
+        return UNKNOWN_ERROR;
+    }
+    cam_dimension_t src_dim;
+    memset(&src_dim, 0, sizeof(cam_dimension_t));
+    main_stream->getFrameDimension(src_dim);
+
+    cam_dimension_t dst_dim;
+    memset(&dst_dim, 0, sizeof(cam_dimension_t));
+    if (NO_ERROR != m_parent->getStreamSize(dst_dim)) {
+        ALOGE("%s: Failed to get size of the JPEG stream", __func__);
+        return UNKNOWN_ERROR;
+    }
+
+    needJpegRotation = hal_obj->needJpegRotation();
+    CDBG_HIGH("%s: Need new session?:%d",__func__, needNewSess);
+    if (needNewSess) {
+        //creating a new session, so we must destroy the old one
+        if ( 0 < mJpegSessionId ) {
+            ret = mJpegHandle.destroy_session(mJpegSessionId);
+            if (ret != NO_ERROR) {
+                ALOGE("%s: Error destroying an old jpeg encoding session, id = %d",
+                      __func__, mJpegSessionId);
+                return ret;
+            }
+            mJpegSessionId = 0;
+        }
+        // create jpeg encoding session
+        mm_jpeg_encode_params_t encodeParam;
+        memset(&encodeParam, 0, sizeof(mm_jpeg_encode_params_t));
+        getJpegEncodeConfig(encodeParam, main_stream, jpeg_settings);
+        CDBG_HIGH("%s: #src bufs:%d # tmb bufs:%d #dst_bufs:%d", __func__,
+                     encodeParam.num_src_bufs,encodeParam.num_tmb_bufs,encodeParam.num_dst_bufs);
+        if (!needJpegRotation &&
+            (jpeg_settings->jpeg_orientation == 90 ||
+            jpeg_settings->jpeg_orientation == 270)) {
+           //swap src width and height, stride and scanline due to rotation
+           encodeParam.main_dim.src_dim.width = src_dim.height;
+           encodeParam.main_dim.src_dim.height = src_dim.width;
+           encodeParam.thumb_dim.src_dim.width = src_dim.height;
+           encodeParam.thumb_dim.src_dim.height = src_dim.width;
+
+           int32_t temp = encodeParam.src_main_buf[0].offset.mp[0].stride;
+           encodeParam.src_main_buf[0].offset.mp[0].stride =
+              encodeParam.src_main_buf[0].offset.mp[0].scanline;
+           encodeParam.src_main_buf[0].offset.mp[0].scanline = temp;
+
+           temp = encodeParam.src_thumb_buf[0].offset.mp[0].stride;
+           encodeParam.src_thumb_buf[0].offset.mp[0].stride =
+              encodeParam.src_thumb_buf[0].offset.mp[0].scanline;
+           encodeParam.src_thumb_buf[0].offset.mp[0].scanline = temp;
+        } else {
+           encodeParam.main_dim.src_dim  = src_dim;
+           encodeParam.thumb_dim.src_dim = src_dim;
+        }
+        encodeParam.main_dim.dst_dim = dst_dim;
+        encodeParam.thumb_dim.dst_dim = jpeg_settings->thumbnail_size;
+        if (needJpegRotation) {
+           encodeParam.rotation = (uint32_t)jpeg_settings->jpeg_orientation;
+        }
+
+        ret = mJpegHandle.create_session(mJpegClientHandle, &encodeParam, &mJpegSessionId);
+        if (ret != NO_ERROR) {
+            ALOGE("%s: Error creating a new jpeg encoding session, ret = %d", __func__, ret);
+            return ret;
+        }
+        needNewSess = FALSE;
+    }
+
+    // Fill in new job
+    memset(&jpg_job, 0, sizeof(mm_jpeg_job_t));
+    jpg_job.job_type = JPEG_JOB_TYPE_ENCODE;
+    jpg_job.encode_job.session_id = mJpegSessionId;
+    jpg_job.encode_job.src_index = (int32_t)main_frame->buf_idx;
+    jpg_job.encode_job.dst_index = 0;
+
+    if (needJpegRotation) {
+        jpg_job.encode_job.rotation = (uint32_t)jpeg_settings->jpeg_orientation;
+        CDBG("%s: %d: jpeg rotation is set to %d", __func__, __LINE__,
+                jpg_job.encode_job.rotation);
+    }
+
+    cam_rect_t crop;
+    memset(&crop, 0, sizeof(cam_rect_t));
+    //TBD_later - Zoom event removed in stream
+    //main_stream->getCropInfo(crop);
+
+    // Set main dim job parameters and handle rotation
+    if (!needJpegRotation && (jpeg_settings->jpeg_orientation == 90 ||
+            jpeg_settings->jpeg_orientation == 270)) {
+
+        jpg_job.encode_job.main_dim.src_dim.width = src_dim.height;
+        jpg_job.encode_job.main_dim.src_dim.height = src_dim.width;
+
+        jpg_job.encode_job.main_dim.dst_dim.width = dst_dim.height;
+        jpg_job.encode_job.main_dim.dst_dim.height = dst_dim.width;
+
+        jpg_job.encode_job.main_dim.crop.width = crop.height;
+        jpg_job.encode_job.main_dim.crop.height = crop.width;
+        jpg_job.encode_job.main_dim.crop.left = crop.top;
+        jpg_job.encode_job.main_dim.crop.top = crop.left;
+    } else {
+        jpg_job.encode_job.main_dim.src_dim = src_dim;
+        jpg_job.encode_job.main_dim.dst_dim = dst_dim;
+        jpg_job.encode_job.main_dim.crop = crop;
+    }
+
+    QCamera3HardwareInterface* obj = (QCamera3HardwareInterface*)m_parent->mUserData;
+    // get 3a sw version info
+    cam_q3a_version_t sw_version;
+    memset(&sw_version, 0, sizeof(sw_version));
+
+    if (obj)
+        obj->get3AVersion(sw_version);
+
+    // get exif data
+    QCamera3Exif *pJpegExifObj = getExifData(metadata, jpeg_settings);
+    jpeg_job_data->pJpegExifObj = pJpegExifObj;
+    if (pJpegExifObj != NULL) {
+        jpg_job.encode_job.exif_info.exif_data = pJpegExifObj->getEntries();
+        jpg_job.encode_job.exif_info.numOfEntries =
+            pJpegExifObj->getNumOfEntries();
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[0] =
+            sw_version.major_version;
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[1] =
+            sw_version.minor_version;
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[2] =
+            sw_version.patch_version;
+        jpg_job.encode_job.exif_info.debug_data.sw_3a_version[3] =
+            sw_version.new_feature_des;
+    }
+
+    // thumbnail dim
+    CDBG_HIGH("%s: Thumbnail needed:%d",__func__, m_bThumbnailNeeded);
+    if (m_bThumbnailNeeded == TRUE) {
+        memset(&crop, 0, sizeof(cam_rect_t));
+        jpg_job.encode_job.thumb_dim.dst_dim =
+                jpeg_settings->thumbnail_size;
+
+      if (!needJpegRotation &&
+          (jpeg_settings->jpeg_orientation  == 90 ||
+           jpeg_settings->jpeg_orientation == 270)) {
+            //swap the thumbnail destination width and height if it has
+            //already been rotated
+            int temp = jpg_job.encode_job.thumb_dim.dst_dim.width;
+            jpg_job.encode_job.thumb_dim.dst_dim.width =
+                    jpg_job.encode_job.thumb_dim.dst_dim.height;
+            jpg_job.encode_job.thumb_dim.dst_dim.height = temp;
+
+            jpg_job.encode_job.thumb_dim.src_dim.width = src_dim.height;
+            jpg_job.encode_job.thumb_dim.src_dim.height = src_dim.width;
+        } else {
+           jpg_job.encode_job.thumb_dim.src_dim = src_dim;
+        }
+        jpg_job.encode_job.thumb_dim.crop = crop;
+        jpg_job.encode_job.thumb_index = main_frame->buf_idx;
+    }
+
+    jpg_job.encode_job.cam_exif_params = hal_obj->get3AExifParams();
+    jpg_job.encode_job.mobicat_mask = hal_obj->getMobicatMask();
+    if (metadata != NULL) {
+       //Fill in the metadata passed as parameter
+       jpg_job.encode_job.p_metadata = metadata;
+
+       jpg_job.encode_job.p_metadata->is_mobicat_aec_params_valid =
+                jpg_job.encode_job.cam_exif_params.cam_3a_params_valid;
+
+       if (jpg_job.encode_job.cam_exif_params.cam_3a_params_valid) {
+            jpg_job.encode_job.p_metadata->mobicat_aec_params =
+                jpg_job.encode_job.cam_exif_params.cam_3a_params;
+       }
+
+       /* Save a copy of 3A debug params */
+        jpg_job.encode_job.p_metadata->is_statsdebug_ae_params_valid =
+                jpg_job.encode_job.cam_exif_params.ae_debug_params_valid;
+        jpg_job.encode_job.p_metadata->is_statsdebug_awb_params_valid =
+                jpg_job.encode_job.cam_exif_params.awb_debug_params_valid;
+        jpg_job.encode_job.p_metadata->is_statsdebug_af_params_valid =
+                jpg_job.encode_job.cam_exif_params.af_debug_params_valid;
+        jpg_job.encode_job.p_metadata->is_statsdebug_asd_params_valid =
+                jpg_job.encode_job.cam_exif_params.asd_debug_params_valid;
+        jpg_job.encode_job.p_metadata->is_statsdebug_stats_params_valid =
+                jpg_job.encode_job.cam_exif_params.stats_debug_params_valid;
+
+        if (jpg_job.encode_job.cam_exif_params.ae_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_ae_data =
+                    jpg_job.encode_job.cam_exif_params.ae_debug_params;
+        }
+        if (jpg_job.encode_job.cam_exif_params.awb_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_awb_data =
+                    jpg_job.encode_job.cam_exif_params.awb_debug_params;
+        }
+        if (jpg_job.encode_job.cam_exif_params.af_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_af_data =
+                    jpg_job.encode_job.cam_exif_params.af_debug_params;
+        }
+        if (jpg_job.encode_job.cam_exif_params.asd_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_asd_data =
+                    jpg_job.encode_job.cam_exif_params.asd_debug_params;
+        }
+        if (jpg_job.encode_job.cam_exif_params.stats_debug_params_valid) {
+            jpg_job.encode_job.p_metadata->statsdebug_stats_buffer_data =
+                    jpg_job.encode_job.cam_exif_params.stats_debug_params;
+        }
+    } else {
+       ALOGE("%s: Metadata is null", __func__);
+    }
+
+    jpg_job.encode_job.hal_version = CAM_HAL_V3;
+
+    //Start jpeg encoding
+    ret = mJpegHandle.start_job(&jpg_job, &jobId);
+    if (ret == NO_ERROR) {
+        // remember job info
+        jpeg_job_data->jobId = jobId;
+    }
+
+    CDBG("%s : X", __func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : dataProcessRoutine
+ *
+ * DESCRIPTION: data process routine that handles input data either from input
+ *              Jpeg Queue to do jpeg encoding, or from input PP Queue to do
+ *              reprocess.
+ *
+ * PARAMETERS :
+ *   @data    : user data ptr (QCamera3PostProcessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void *QCamera3PostProcessor::dataProcessRoutine(void *data)
+{
+    int running = 1;
+    int ret;
+    uint8_t is_active = FALSE;
+    uint8_t needNewSess = TRUE;
+    mm_camera_super_buf_t *meta_buffer = NULL;
+    CDBG("%s: E", __func__);
+    QCamera3PostProcessor *pme = (QCamera3PostProcessor *)data;
+    QCameraCmdThread *cmdThread = &pme->m_dataProcTh;
+    cmdThread->setName("cam_data_proc");
+
+    do {
+        do {
+            ret = cam_sem_wait(&cmdThread->cmd_sem);
+            if (ret != 0 && errno != EINVAL) {
+                ALOGE("%s: cam_sem_wait error (%s)",
+                           __func__, strerror(errno));
+                return NULL;
+            }
+        } while (ret != 0);
+
+        // we got notified about new cmd avail in cmd queue
+        camera_cmd_type_t cmd = cmdThread->getCmd();
+        switch (cmd) {
+        case CAMERA_CMD_TYPE_START_DATA_PROC:
+            CDBG_HIGH("%s: start data proc", __func__);
+            is_active = TRUE;
+            needNewSess = TRUE;
+
+            pme->m_ongoingPPQ.init();
+            pme->m_inputJpegQ.init();
+            pme->m_inputPPQ.init();
+            pme->m_inputFWKPPQ.init();
+            pme->m_inputMetaQ.init();
+            cam_sem_post(&cmdThread->sync_sem);
+
+            break;
+        case CAMERA_CMD_TYPE_STOP_DATA_PROC:
+            {
+                CDBG_HIGH("%s: stop data proc", __func__);
+                is_active = FALSE;
+
+                // cancel all ongoing jpeg jobs
+                qcamera_hal3_jpeg_data_t *jpeg_job =
+                    (qcamera_hal3_jpeg_data_t *)pme->m_ongoingJpegQ.dequeue();
+                while (jpeg_job != NULL) {
+                    pme->mJpegHandle.abort_job(jpeg_job->jobId);
+
+                    pme->releaseJpegJobData(jpeg_job);
+                    free(jpeg_job);
+
+                    jpeg_job = (qcamera_hal3_jpeg_data_t *)pme->m_ongoingJpegQ.dequeue();
+                }
+
+                // destroy jpeg encoding session
+                if ( 0 < pme->mJpegSessionId ) {
+                    pme->mJpegHandle.destroy_session(pme->mJpegSessionId);
+                    pme->mJpegSessionId = 0;
+                }
+
+                needNewSess = TRUE;
+
+                // flush ongoing postproc Queue
+                pme->m_ongoingPPQ.flush();
+
+                // flush input jpeg Queue
+                pme->m_inputJpegQ.flush();
+
+                // flush input Postproc Queue
+                pme->m_inputPPQ.flush();
+
+                // flush framework input Postproc Queue
+                pme->m_inputFWKPPQ.flush();
+
+                pme->m_inputMetaQ.flush();
+
+                // signal cmd is completed
+                cam_sem_post(&cmdThread->sync_sem);
+            }
+            break;
+        case CAMERA_CMD_TYPE_DO_NEXT_JOB:
+            {
+                CDBG_HIGH("%s: Do next job, active is %d", __func__, is_active);
+                /* needNewSess is set to TRUE as postproc is not re-STARTed
+                 * anymore for every captureRequest */
+                needNewSess = TRUE;
+                if (is_active == TRUE) {
+                    // check if there is any ongoing jpeg jobs
+                    if (pme->m_ongoingJpegQ.isEmpty()) {
+                        CDBG("%s: ongoing jpeg queue is empty so doing the jpeg job", __func__);
+                        // no ongoing jpeg job, we are fine to send jpeg encoding job
+                        qcamera_hal3_jpeg_data_t *jpeg_job =
+                            (qcamera_hal3_jpeg_data_t *)pme->m_inputJpegQ.dequeue();
+
+                        if (NULL != jpeg_job) {
+                            // add into ongoing jpeg job Q
+                            pme->m_ongoingJpegQ.enqueue((void *)jpeg_job);
+
+                            if (jpeg_job->fwk_frame) {
+                                ret = pme->encodeFWKData(jpeg_job, needNewSess);
+                            } else {
+                                ret = pme->encodeData(jpeg_job, needNewSess);
+                            }
+                            if (NO_ERROR != ret) {
+                                // dequeue the last one
+                                pme->m_ongoingJpegQ.dequeue(false);
+
+                                pme->releaseJpegJobData(jpeg_job);
+                                free(jpeg_job);
+                            }
+                        }
+                    }
+
+                    // check if there are any framework pp jobs
+                    if (!pme->m_inputFWKPPQ.isEmpty()) {
+                        qcamera_fwk_input_pp_data_t *fwk_frame =
+                                (qcamera_fwk_input_pp_data_t *) pme->m_inputFWKPPQ.dequeue();
+                        if (NULL != fwk_frame) {
+                            qcamera_hal3_pp_data_t *pp_job =
+                                    (qcamera_hal3_pp_data_t *)malloc(sizeof(qcamera_hal3_pp_data_t));
+                            jpeg_settings_t *jpeg_settings =
+                                    (jpeg_settings_t *)pme->m_jpegSettingsQ.dequeue();
+                            if (pp_job != NULL) {
+                                memset(pp_job, 0, sizeof(qcamera_hal3_pp_data_t));
+                                pp_job->jpeg_settings = jpeg_settings;
+                                if (pme->m_pReprocChannel != NULL) {
+                                    if (NO_ERROR != pme->m_pReprocChannel->overrideFwkMetadata(fwk_frame)) {
+                                        ALOGE("%s: Failed to extract output crop", __func__);
+                                    }
+                                    // add into ongoing PP job Q
+                                    pp_job->fwk_src_frame = fwk_frame;
+                                    pme->m_ongoingPPQ.enqueue((void *)pp_job);
+                                    ret = pme->m_pReprocChannel->doReprocessOffline(fwk_frame);
+                                    if (NO_ERROR != ret) {
+                                        // remove from ongoing PP job Q
+                                        pme->m_ongoingPPQ.dequeue(false);
+                                    }
+                                } else {
+                                    ALOGE("%s: Reprocess channel is NULL", __func__);
+                                    ret = -1;
+                                }
+                            } else {
+                                ALOGE("%s: no mem for qcamera_hal3_pp_data_t", __func__);
+                                ret = -1;
+                            }
+
+                            if (0 != ret) {
+                                // free pp_job
+                                if (pp_job != NULL) {
+                                    free(pp_job);
+                                }
+                                // free frame
+                                if (fwk_frame != NULL) {
+                                    free(fwk_frame);
+                                }
+                            }
+                        }
+                    }
+
+                    CDBG_HIGH("%s: dequeuing pp frame", __func__);
+                    pthread_mutex_lock(&pme->mReprocJobLock);
+                    if(!pme->m_inputPPQ.isEmpty() && !pme->m_inputMetaQ.isEmpty()) {
+                        qcamera_hal3_pp_buffer_t *pp_buffer =
+                            (qcamera_hal3_pp_buffer_t *)pme->m_inputPPQ.dequeue();
+                        meta_buffer =
+                            (mm_camera_super_buf_t *)pme->m_inputMetaQ.dequeue();
+                        jpeg_settings_t *jpeg_settings =
+                           (jpeg_settings_t *)pme->m_jpegSettingsQ.dequeue();
+                        pthread_mutex_unlock(&pme->mReprocJobLock);
+                        qcamera_hal3_pp_data_t *pp_job =
+                            (qcamera_hal3_pp_data_t *)malloc(sizeof(qcamera_hal3_pp_data_t));
+                        if (pp_job == NULL) {
+                            ALOGE("%s: no mem for qcamera_hal3_pp_data_t",
+                                    __func__);
+                            ret = -1;
+                        } else if (meta_buffer == NULL) {
+                            ALOGE("%s: no mem for mm_camera_super_buf_t",
+                                    __func__);
+                            ret = -1;
+                        } else {
+                            memset(pp_job, 0, sizeof(qcamera_hal3_pp_data_t));
+                            pp_job->src_frame = pp_buffer->input;
+                            pp_job->src_metadata = meta_buffer;
+                            if (meta_buffer->bufs[0] != NULL) {
+                                pp_job->metadata = (metadata_buffer_t *)
+                                        meta_buffer->bufs[0]->buffer;
+                            }
+                            pp_job->jpeg_settings = jpeg_settings;
+                            pme->m_ongoingPPQ.enqueue((void *)pp_job);
+                            if (pme->m_pReprocChannel != NULL) {
+                                mm_camera_buf_def_t *meta_buffer_arg = NULL;
+                                meta_buffer_arg = meta_buffer->bufs[0];
+                                qcamera_fwk_input_pp_data_t fwk_frame;
+                                memset(&fwk_frame, 0, sizeof(qcamera_fwk_input_pp_data_t));
+                                fwk_frame.frameNumber = pp_buffer->frameNumber;
+                                ret = pme->m_pReprocChannel->overrideMetadata(
+                                        pp_buffer, meta_buffer_arg,
+                                        pp_job->jpeg_settings,
+                                        fwk_frame);
+                                if (NO_ERROR == ret) {
+                                    // add into ongoing PP job Q
+                                    ret = pme->m_pReprocChannel->doReprocessOffline(
+                                            &fwk_frame);
+                                    if (NO_ERROR != ret) {
+                                        // remove from ongoing PP job Q
+                                        pme->m_ongoingPPQ.dequeue(false);
+                                    }
+                                }
+                            } else {
+                                ALOGE("%s: No reprocess. Calling processPPData directly",
+                                    __func__);
+                                ret = pme->processPPData(pp_buffer->input);
+                            }
+                        }
+
+                        if (0 != ret) {
+                            // free pp_job
+                            if (pp_job != NULL) {
+                                free(pp_job);
+                            }
+                            // free frame
+                            if (pp_buffer != NULL) {
+                                if (pp_buffer->input) {
+                                    pme->releaseSuperBuf(pp_buffer->input);
+                                    free(pp_buffer->input);
+                                }
+                                free(pp_buffer);
+                            }
+                            //free metadata
+                            if (NULL != meta_buffer) {
+                                pme->m_parent->metadataBufDone(meta_buffer);
+                                free(meta_buffer);
+                            }
+                        } else {
+                            if (pp_buffer != NULL) {
+                                free(pp_buffer);
+                            }
+                        }
+                    } else {
+                        pthread_mutex_unlock(&pme->mReprocJobLock);
+                    }
+                } else {
+                    // not active, simply return buf and do no op
+                    qcamera_hal3_jpeg_data_t *jpeg_job =
+                        (qcamera_hal3_jpeg_data_t *)pme->m_inputJpegQ.dequeue();
+                    if (NULL != jpeg_job) {
+                        free(jpeg_job);
+                    }
+
+                    qcamera_hal3_pp_buffer_t* pp_buf =
+                            (qcamera_hal3_pp_buffer_t *)pme->m_inputPPQ.dequeue();
+                    if (NULL != pp_buf) {
+                        if (pp_buf->input) {
+                            pme->releaseSuperBuf(pp_buf->input);
+                            free(pp_buf->input);
+                            pp_buf->input = NULL;
+                        }
+                        free(pp_buf);
+                    }
+                    mm_camera_super_buf_t *metadata = (mm_camera_super_buf_t *)pme->m_inputMetaQ.dequeue();
+                    if (metadata != NULL) {
+                        pme->m_parent->metadataBufDone(metadata);
+                        free(metadata);
+                    }
+                    qcamera_fwk_input_pp_data_t *fwk_frame =
+                            (qcamera_fwk_input_pp_data_t *) pme->m_inputFWKPPQ.dequeue();
+                    if (NULL != fwk_frame) {
+                        free(fwk_frame);
+                    }
+                }
+            }
+            break;
+        case CAMERA_CMD_TYPE_EXIT:
+            running = 0;
+            break;
+        default:
+            break;
+        }
+    } while (running);
+    CDBG("%s: X", __func__);
+    return NULL;
+}
+
+/* EXIF related helper methods */
+
+/*===========================================================================
+ * FUNCTION   : getRational
+ *
+ * DESCRIPTION: compose rational struct
+ *
+ * PARAMETERS :
+ *   @rat     : ptr to struct to store rational info
+ *   @num     :num of the rational
+ *   @denom   : denom of the rational
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t getRational(rat_t *rat, int num, int denom)
+{
+    if ((0 > num) || (0 >= denom)) {
+        ALOGE("%s: Negative values", __func__);
+        return BAD_VALUE;
+    }
+    if (NULL == rat) {
+        ALOGE("%s: NULL rat input", __func__);
+        return BAD_VALUE;
+    }
+    rat->num = (uint32_t)num;
+    rat->denom = (uint32_t)denom;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : parseGPSCoordinate
+ *
+ * DESCRIPTION: parse GPS coordinate string
+ *
+ * PARAMETERS :
+ *   @coord_str : [input] coordinate string
+ *   @coord     : [output]  ptr to struct to store coordinate
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int parseGPSCoordinate(const char *coord_str, rat_t* coord)
+{
+    if(coord == NULL) {
+        ALOGE("%s: error, invalid argument coord == NULL", __func__);
+        return BAD_VALUE;
+    }
+    double degF = atof(coord_str);
+    if (degF < 0) {
+        degF = -degF;
+    }
+    double minF = (degF - (int) degF) * 60;
+    double secF = (minF - (int) minF) * 60;
+
+    getRational(&coord[0], (int)degF, 1);
+    getRational(&coord[1], (int)minF, 1);
+    getRational(&coord[2], (int)(secF * 10000), 10000);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifDateTime
+ *
+ * DESCRIPTION: query exif date time
+ *
+ * PARAMETERS :
+ *   @dateTime   : string to store exif date time
+ *   @subsecTime : string to store exif subsec time
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t getExifDateTime(String8 &dateTime, String8 &subsecTime)
+{
+    int32_t ret = NO_ERROR;
+
+    //get time and date from system
+    struct timeval tv;
+    struct tm timeinfo_data;
+
+    int res = gettimeofday(&tv, NULL);
+    if (0 == res) {
+        struct tm *timeinfo = localtime_r(&tv.tv_sec, &timeinfo_data);
+        if (NULL != timeinfo) {
+            //Write datetime according to EXIF Spec
+            //"YYYY:MM:DD HH:MM:SS" (20 chars including \0)
+            dateTime = String8::format("%04d:%02d:%02d %02d:%02d:%02d",
+                    timeinfo->tm_year + 1900, timeinfo->tm_mon + 1,
+                    timeinfo->tm_mday, timeinfo->tm_hour,
+                    timeinfo->tm_min, timeinfo->tm_sec);
+            //Write subsec according to EXIF Sepc
+            subsecTime = String8::format("%06ld", tv.tv_usec);
+        } else {
+            ALOGE("%s: localtime_r() error", __func__);
+            ret = UNKNOWN_ERROR;
+        }
+    } else if (-1 == res) {
+        ALOGE("%s: gettimeofday() error: %s", __func__, strerror(errno));
+        ret = UNKNOWN_ERROR;
+    } else {
+        ALOGE("%s: gettimeofday() unexpected return code: %d", __func__, res);
+        ret = UNKNOWN_ERROR;
+    }
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifFocalLength
+ *
+ * DESCRIPTION: get exif focal length
+ *
+ * PARAMETERS :
+ *   @focalLength : ptr to rational struct to store focal length
+ *   @value       : focal length value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t getExifFocalLength(rat_t *focalLength, float value)
+{
+    int focalLengthValue =
+        (int)(value * FOCAL_LENGTH_DECIMAL_PRECISION);
+    return getRational(focalLength, focalLengthValue, FOCAL_LENGTH_DECIMAL_PRECISION);
+}
+
+/*===========================================================================
+  * FUNCTION   : getExifExpTimeInfo
+  *
+  * DESCRIPTION: get exif exposure time information
+  *
+  * PARAMETERS :
+  *   @expoTimeInfo     : rational exposure time value
+  *   @value            : exposure time value
+  * RETURN     : nt32_t type of status
+  *              NO_ERROR  -- success
+  *              none-zero failure code
+  *==========================================================================*/
+int32_t getExifExpTimeInfo(rat_t *expoTimeInfo, int64_t value)
+{
+
+    int64_t cal_exposureTime;
+    if (value != 0)
+        cal_exposureTime = value;
+    else
+        cal_exposureTime = 60;
+
+    return getRational(expoTimeInfo, 1, (int)cal_exposureTime);
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifGpsProcessingMethod
+ *
+ * DESCRIPTION: get GPS processing method
+ *
+ * PARAMETERS :
+ *   @gpsProcessingMethod : string to store GPS process method
+ *   @count               : length of the string
+ *   @value               : the value of the processing method
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t getExifGpsProcessingMethod(char *gpsProcessingMethod,
+        uint32_t &count, char* value)
+{
+    if(value != NULL) {
+        memcpy(gpsProcessingMethod, ExifAsciiPrefix, EXIF_ASCII_PREFIX_SIZE);
+        count = EXIF_ASCII_PREFIX_SIZE;
+        strlcpy(gpsProcessingMethod + EXIF_ASCII_PREFIX_SIZE,
+                value,
+                strlen(value)+1);
+        count += (uint32_t)strlen(value);
+        gpsProcessingMethod[count++] = '\0'; // increase 1 for the last NULL char
+        return NO_ERROR;
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifLatitude
+ *
+ * DESCRIPTION: get exif latitude
+ *
+ * PARAMETERS :
+ *   @latitude : ptr to rational struct to store latitude info
+ *   @latRef   : character to indicate latitude reference
+ *   @value    : value of the latitude
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t getExifLatitude(rat_t *latitude, char *latRef, double value)
+{
+    char str[30];
+    snprintf(str, sizeof(str), "%f", value);
+    if(str != NULL) {
+        parseGPSCoordinate(str, latitude);
+
+        //set Latitude Ref
+        float latitudeValue = strtof(str, 0);
+        if(latitudeValue < 0.0f) {
+            latRef[0] = 'S';
+        } else {
+            latRef[0] = 'N';
+        }
+        latRef[1] = '\0';
+        return NO_ERROR;
+    }else{
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifLongitude
+ *
+ * DESCRIPTION: get exif longitude
+ *
+ * PARAMETERS :
+ *   @longitude : ptr to rational struct to store longitude info
+ *   @lonRef    : character to indicate longitude reference
+ *   @value     : value of the longitude
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t getExifLongitude(rat_t *longitude, char *lonRef, double value)
+{
+    char str[30];
+    snprintf(str, sizeof(str), "%f", value);
+    if(str != NULL) {
+        parseGPSCoordinate(str, longitude);
+
+        //set Longitude Ref
+        float longitudeValue = strtof(str, 0);
+        if(longitudeValue < 0.0f) {
+            lonRef[0] = 'W';
+        } else {
+            lonRef[0] = 'E';
+        }
+        lonRef[1] = '\0';
+        return NO_ERROR;
+    }else{
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifAltitude
+ *
+ * DESCRIPTION: get exif altitude
+ *
+ * PARAMETERS :
+ *   @altitude : ptr to rational struct to store altitude info
+ *   @altRef   : character to indicate altitude reference
+ *   @argValue : altitude value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t getExifAltitude(rat_t *altitude, char *altRef, double argValue)
+{
+    char str[30];
+    snprintf(str, sizeof(str), "%f", argValue);
+    if (str != NULL) {
+        double value = atof(str);
+        *altRef = 0;
+        if(value < 0){
+            *altRef = 1;
+            value = -value;
+        }
+        return getRational(altitude, (int)(value * 1000), 1000);
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifGpsDateTimeStamp
+ *
+ * DESCRIPTION: get exif GPS date time stamp
+ *
+ * PARAMETERS :
+ *   @gpsDateStamp : GPS date time stamp string
+ *   @bufLen       : length of the string
+ *   @gpsTimeStamp : ptr to rational struct to store time stamp info
+ *   @value        : timestamp value
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t getExifGpsDateTimeStamp(char *gpsDateStamp, uint32_t bufLen,
+        rat_t *gpsTimeStamp, int64_t value)
+{
+    char str[30];
+    snprintf(str, sizeof(str), "%lld", (long long int)value);
+    if(str != NULL) {
+        time_t unixTime = (time_t)atol(str);
+        struct tm *UTCTimestamp = gmtime(&unixTime);
+        if (UTCTimestamp != NULL && gpsDateStamp != NULL
+                && gpsTimeStamp != NULL) {
+            strftime(gpsDateStamp, bufLen, "%Y:%m:%d", UTCTimestamp);
+
+            getRational(&gpsTimeStamp[0], UTCTimestamp->tm_hour, 1);
+            getRational(&gpsTimeStamp[1], UTCTimestamp->tm_min, 1);
+            getRational(&gpsTimeStamp[2], UTCTimestamp->tm_sec, 1);
+            return NO_ERROR;
+        } else {
+            ALOGE("%s: Could not get the timestamp", __func__);
+            return BAD_VALUE;
+        }
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifExposureValue
+ *
+ * DESCRIPTION: get exif GPS date time stamp
+ *
+ * PARAMETERS :
+ *   @exposure_val        : rational exposure value
+ *   @exposure_comp       : exposure compensation
+ *   @step                : exposure step
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t getExifExposureValue(srat_t* exposure_val, int32_t exposure_comp,
+        cam_rational_type_t step)
+{
+    exposure_val->num = exposure_comp * step.numerator;
+    exposure_val->denom = step.denominator;
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : getExifData
+ *
+ * DESCRIPTION: get exif data to be passed into jpeg encoding
+ *
+ * PARAMETERS :
+ * @metadata      : metadata of the encoding request
+ * @jpeg_settings : jpeg_settings for encoding
+ *
+ * RETURN     : exif data from user setting and GPS
+ *==========================================================================*/
+QCamera3Exif *QCamera3PostProcessor::getExifData(metadata_buffer_t *metadata,
+        jpeg_settings_t *jpeg_settings)
+{
+    QCamera3Exif *exif = new QCamera3Exif();
+    if (exif == NULL) {
+        ALOGE("%s: No memory for QCamera3Exif", __func__);
+        return NULL;
+    }
+
+    int32_t rc = NO_ERROR;
+    uint32_t count = 0;
+
+    // add exif entries
+    String8 dateTime;
+    String8 subsecTime;
+    rc = getExifDateTime(dateTime, subsecTime);
+    if (rc == NO_ERROR) {
+        exif->addEntry(EXIFTAGID_DATE_TIME, EXIF_ASCII,
+                (uint32_t)(dateTime.length() + 1), (void *)dateTime.string());
+        exif->addEntry(EXIFTAGID_EXIF_DATE_TIME_ORIGINAL, EXIF_ASCII,
+                (uint32_t)(dateTime.length() + 1), (void *)dateTime.string());
+        exif->addEntry(EXIFTAGID_EXIF_DATE_TIME_DIGITIZED, EXIF_ASCII,
+                (uint32_t)(dateTime.length() + 1), (void *)dateTime.string());
+        exif->addEntry(EXIFTAGID_SUBSEC_TIME, EXIF_ASCII,
+                (uint32_t)(subsecTime.length() + 1), (void *)subsecTime.string());
+        exif->addEntry(EXIFTAGID_SUBSEC_TIME_ORIGINAL, EXIF_ASCII,
+                (uint32_t)(subsecTime.length() + 1), (void *)subsecTime.string());
+        exif->addEntry(EXIFTAGID_SUBSEC_TIME_DIGITIZED, EXIF_ASCII,
+                (uint32_t)(subsecTime.length() + 1), (void *)subsecTime.string());
+    } else {
+        ALOGE("%s: getExifDateTime failed", __func__);
+    }
+
+
+    if (metadata != NULL) {
+        IF_META_AVAILABLE(float, focal_length, CAM_INTF_META_LENS_FOCAL_LENGTH, metadata) {
+            rat_t focalLength;
+            rc = getExifFocalLength(&focalLength, *focal_length);
+            if (rc == NO_ERROR) {
+                exif->addEntry(EXIFTAGID_FOCAL_LENGTH,
+                        EXIF_RATIONAL,
+                        1,
+                        (void *)&(focalLength));
+            } else {
+                ALOGE("%s: getExifFocalLength failed", __func__);
+            }
+        }
+
+        IF_META_AVAILABLE(int32_t, isoSpeed, CAM_INTF_META_SENSOR_SENSITIVITY, metadata) {
+            int16_t fwk_isoSpeed = (int16_t) *isoSpeed;
+            exif->addEntry(EXIFTAGID_ISO_SPEED_RATING, EXIF_SHORT, 1, (void *) &(fwk_isoSpeed));
+        }
+
+
+        IF_META_AVAILABLE(int64_t, sensor_exposure_time,
+                CAM_INTF_META_SENSOR_EXPOSURE_TIME, metadata) {
+            rat_t sensorExpTime;
+            rc = getExifExpTimeInfo(&sensorExpTime, *sensor_exposure_time);
+            if (rc == NO_ERROR){
+                exif->addEntry(EXIFTAGID_EXPOSURE_TIME,
+                        EXIF_RATIONAL,
+                        1,
+                        (void *)&(sensorExpTime));
+            } else {
+                ALOGE("%s: getExifExpTimeInfo failed", __func__);
+            }
+        }
+
+        char* jpeg_gps_processing_method = jpeg_settings->gps_processing_method;
+        if (strlen(jpeg_gps_processing_method) > 0) {
+            char gpsProcessingMethod[EXIF_ASCII_PREFIX_SIZE +
+                    GPS_PROCESSING_METHOD_SIZE];
+            count = 0;
+            rc = getExifGpsProcessingMethod(gpsProcessingMethod,
+                    count,
+                    jpeg_gps_processing_method);
+            if(rc == NO_ERROR) {
+                exif->addEntry(EXIFTAGID_GPS_PROCESSINGMETHOD,
+                        EXIF_ASCII,
+                        count,
+                        (void *)gpsProcessingMethod);
+            } else {
+                ALOGE("%s: getExifGpsProcessingMethod failed", __func__);
+            }
+        }
+
+        if (jpeg_settings->gps_coordinates_valid) {
+
+            //latitude
+            rat_t latitude[3];
+            char latRef[2];
+            rc = getExifLatitude(latitude, latRef,
+                    jpeg_settings->gps_coordinates[0]);
+            if(rc == NO_ERROR) {
+                exif->addEntry(EXIFTAGID_GPS_LATITUDE,
+                        EXIF_RATIONAL,
+                        3,
+                        (void *)latitude);
+                exif->addEntry(EXIFTAGID_GPS_LATITUDE_REF,
+                        EXIF_ASCII,
+                        2,
+                        (void *)latRef);
+            } else {
+                ALOGE("%s: getExifLatitude failed", __func__);
+            }
+
+            //longitude
+            rat_t longitude[3];
+            char lonRef[2];
+            rc = getExifLongitude(longitude, lonRef,
+                    jpeg_settings->gps_coordinates[1]);
+            if(rc == NO_ERROR) {
+                exif->addEntry(EXIFTAGID_GPS_LONGITUDE,
+                        EXIF_RATIONAL,
+                        3,
+                        (void *)longitude);
+
+                exif->addEntry(EXIFTAGID_GPS_LONGITUDE_REF,
+                        EXIF_ASCII,
+                        2,
+                        (void *)lonRef);
+            } else {
+                ALOGE("%s: getExifLongitude failed", __func__);
+            }
+
+            //altitude
+            rat_t altitude;
+            char altRef;
+            rc = getExifAltitude(&altitude, &altRef,
+                    jpeg_settings->gps_coordinates[2]);
+            if(rc == NO_ERROR) {
+                exif->addEntry(EXIFTAGID_GPS_ALTITUDE,
+                        EXIF_RATIONAL,
+                        1,
+                        (void *)&(altitude));
+
+                exif->addEntry(EXIFTAGID_GPS_ALTITUDE_REF,
+                        EXIF_BYTE,
+                        1,
+                        (void *)&altRef);
+            } else {
+                ALOGE("%s: getExifAltitude failed", __func__);
+            }
+        }
+
+        if (jpeg_settings->gps_timestamp_valid) {
+
+            char gpsDateStamp[20];
+            rat_t gpsTimeStamp[3];
+            rc = getExifGpsDateTimeStamp(gpsDateStamp, 20, gpsTimeStamp,
+                    jpeg_settings->gps_timestamp);
+            if(rc == NO_ERROR) {
+                exif->addEntry(EXIFTAGID_GPS_DATESTAMP, EXIF_ASCII,
+                        (uint32_t)(strlen(gpsDateStamp) + 1),
+                        (void *)gpsDateStamp);
+
+                exif->addEntry(EXIFTAGID_GPS_TIMESTAMP,
+                        EXIF_RATIONAL,
+                        3,
+                        (void *)gpsTimeStamp);
+            } else {
+                ALOGE("%s: getExifGpsDataTimeStamp failed", __func__);
+            }
+        }
+
+        IF_META_AVAILABLE(int32_t, exposure_comp, CAM_INTF_PARM_EXPOSURE_COMPENSATION, metadata) {
+            IF_META_AVAILABLE(cam_rational_type_t, comp_step, CAM_INTF_PARM_EV_STEP, metadata) {
+                srat_t exposure_val;
+                rc = getExifExposureValue(&exposure_val, *exposure_comp, *comp_step);
+                if(rc == NO_ERROR) {
+                    exif->addEntry(EXIFTAGID_EXPOSURE_BIAS_VALUE,
+                            EXIF_SRATIONAL,
+                            1,
+                            (void *)(&exposure_val));
+                } else {
+                    ALOGE("%s: getExifExposureValue failed ", __func__);
+                }
+            }
+        }
+    } else {
+        ALOGE("%s: no metadata provided ", __func__);
+    }
+
+    bool output_image_desc = true;
+
+#ifdef ENABLE_MODEL_INFO_EXIF
+
+    char value[PROPERTY_VALUE_MAX];
+    if (property_get("ro.product.manufacturer", value, "QCOM-AA") > 0) {
+        exif->addEntry(EXIFTAGID_MAKE, EXIF_ASCII,
+                (uint32_t)(strlen(value) + 1), (void *)value);
+    } else {
+        ALOGE("%s: getExifMaker failed", __func__);
+    }
+
+    if (property_get("ro.product.model", value, "QCAM-AA") > 0) {
+        exif->addEntry(EXIFTAGID_MODEL, EXIF_ASCII,
+                (uint32_t)(strlen(value) + 1), (void *)value);
+    } else {
+        ALOGE("%s: getExifModel failed", __func__);
+    }
+
+    if (property_get("ro.build.description", value, "QCAM-AA") > 0) {
+        exif->addEntry(EXIFTAGID_SOFTWARE, EXIF_ASCII,
+                (uint32_t)(strlen(value) + 1), (void *)value);
+    } else {
+        ALOGE("%s: getExifSoftware failed", __func__);
+    }
+
+    // Production sw should not enable image description field output
+    output_image_desc = false;
+#endif
+
+    if (jpeg_settings->image_desc_valid && output_image_desc) {
+        if (exif->addEntry(EXIFTAGID_IMAGE_DESCRIPTION, EXIF_ASCII,
+                strlen(jpeg_settings->image_desc)+1,
+                (void *)jpeg_settings->image_desc)) {
+            ALOGE("%s: Adding IMAGE_DESCRIPTION tag failed", __func__);
+        }
+    }
+    return exif;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCamera3Exif
+ *
+ * DESCRIPTION: constructor of QCamera3Exif
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3Exif::QCamera3Exif()
+    : m_nNumEntries(0)
+{
+    memset(m_Entries, 0, sizeof(m_Entries));
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3Exif
+ *
+ * DESCRIPTION: deconstructor of QCamera3Exif. Will release internal memory ptr.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3Exif::~QCamera3Exif()
+{
+    for (uint32_t i = 0; i < m_nNumEntries; i++) {
+        switch (m_Entries[i].tag_entry.type) {
+            case EXIF_BYTE:
+                {
+                    if (m_Entries[i].tag_entry.count > 1 &&
+                            m_Entries[i].tag_entry.data._bytes != NULL) {
+                        free(m_Entries[i].tag_entry.data._bytes);
+                        m_Entries[i].tag_entry.data._bytes = NULL;
+                    }
+                }
+                break;
+            case EXIF_ASCII:
+                {
+                    if (m_Entries[i].tag_entry.data._ascii != NULL) {
+                        free(m_Entries[i].tag_entry.data._ascii);
+                        m_Entries[i].tag_entry.data._ascii = NULL;
+                    }
+                }
+                break;
+            case EXIF_SHORT:
+                {
+                    if (m_Entries[i].tag_entry.count > 1 &&
+                            m_Entries[i].tag_entry.data._shorts != NULL) {
+                        free(m_Entries[i].tag_entry.data._shorts);
+                        m_Entries[i].tag_entry.data._shorts = NULL;
+                    }
+                }
+                break;
+            case EXIF_LONG:
+                {
+                    if (m_Entries[i].tag_entry.count > 1 &&
+                            m_Entries[i].tag_entry.data._longs != NULL) {
+                        free(m_Entries[i].tag_entry.data._longs);
+                        m_Entries[i].tag_entry.data._longs = NULL;
+                    }
+                }
+                break;
+            case EXIF_RATIONAL:
+                {
+                    if (m_Entries[i].tag_entry.count > 1 &&
+                            m_Entries[i].tag_entry.data._rats != NULL) {
+                        free(m_Entries[i].tag_entry.data._rats);
+                        m_Entries[i].tag_entry.data._rats = NULL;
+                    }
+                }
+                break;
+            case EXIF_UNDEFINED:
+                {
+                    if (m_Entries[i].tag_entry.data._undefined != NULL) {
+                        free(m_Entries[i].tag_entry.data._undefined);
+                        m_Entries[i].tag_entry.data._undefined = NULL;
+                    }
+                }
+                break;
+            case EXIF_SLONG:
+                {
+                    if (m_Entries[i].tag_entry.count > 1 &&
+                            m_Entries[i].tag_entry.data._slongs != NULL) {
+                        free(m_Entries[i].tag_entry.data._slongs);
+                        m_Entries[i].tag_entry.data._slongs = NULL;
+                    }
+                }
+                break;
+            case EXIF_SRATIONAL:
+                {
+                    if (m_Entries[i].tag_entry.count > 1 &&
+                            m_Entries[i].tag_entry.data._srats != NULL) {
+                        free(m_Entries[i].tag_entry.data._srats);
+                        m_Entries[i].tag_entry.data._srats = NULL;
+                    }
+                }
+                break;
+            default:
+                ALOGE("%s: Error, Unknown type",__func__);
+                break;
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : addEntry
+ *
+ * DESCRIPTION: function to add an entry to exif data
+ *
+ * PARAMETERS :
+ *   @tagid   : exif tag ID
+ *   @type    : data type
+ *   @count   : number of data in uint of its type
+ *   @data    : input data ptr
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Exif::addEntry(exif_tag_id_t tagid,
+                              exif_tag_type_t type,
+                              uint32_t count,
+                              void *data)
+{
+    int32_t rc = NO_ERROR;
+    if(m_nNumEntries >= MAX_HAL3_EXIF_TABLE_ENTRIES) {
+        ALOGE("%s: Number of entries exceeded limit", __func__);
+        return NO_MEMORY;
+    }
+
+    m_Entries[m_nNumEntries].tag_id = tagid;
+    m_Entries[m_nNumEntries].tag_entry.type = type;
+    m_Entries[m_nNumEntries].tag_entry.count = count;
+    m_Entries[m_nNumEntries].tag_entry.copy = 1;
+    switch (type) {
+        case EXIF_BYTE:
+            {
+                if (count > 1) {
+                    uint8_t *values = (uint8_t *)malloc(count);
+                    if (values == NULL) {
+                        ALOGE("%s: No memory for byte array", __func__);
+                        rc = NO_MEMORY;
+                    } else {
+                        memcpy(values, data, count);
+                        m_Entries[m_nNumEntries].tag_entry.data._bytes = values;
+                    }
+                } else {
+                    m_Entries[m_nNumEntries].tag_entry.data._byte =
+                        *(uint8_t *)data;
+                }
+            }
+            break;
+        case EXIF_ASCII:
+            {
+                char *str = NULL;
+                str = (char *)malloc(count + 1);
+                if (str == NULL) {
+                    ALOGE("%s: No memory for ascii string", __func__);
+                    rc = NO_MEMORY;
+                } else {
+                    memset(str, 0, count + 1);
+                    memcpy(str, data, count);
+                    m_Entries[m_nNumEntries].tag_entry.data._ascii = str;
+                }
+            }
+            break;
+        case EXIF_SHORT:
+            {
+                if (count > 1) {
+                    uint16_t *values =
+                        (uint16_t *)malloc(count * sizeof(uint16_t));
+                    if (values == NULL) {
+                        ALOGE("%s: No memory for short array", __func__);
+                        rc = NO_MEMORY;
+                    } else {
+                        memcpy(values, data, count * sizeof(uint16_t));
+                        m_Entries[m_nNumEntries].tag_entry.data._shorts =values;
+                    }
+                } else {
+                    m_Entries[m_nNumEntries].tag_entry.data._short =
+                        *(uint16_t *)data;
+                }
+            }
+            break;
+        case EXIF_LONG:
+            {
+                if (count > 1) {
+                    uint32_t *values =
+                        (uint32_t *)malloc(count * sizeof(uint32_t));
+                    if (values == NULL) {
+                        ALOGE("%s: No memory for long array", __func__);
+                        rc = NO_MEMORY;
+                    } else {
+                        memcpy(values, data, count * sizeof(uint32_t));
+                        m_Entries[m_nNumEntries].tag_entry.data._longs = values;
+                    }
+                } else {
+                    m_Entries[m_nNumEntries].tag_entry.data._long =
+                        *(uint32_t *)data;
+                }
+            }
+            break;
+        case EXIF_RATIONAL:
+            {
+                if (count > 1) {
+                    rat_t *values = (rat_t *)malloc(count * sizeof(rat_t));
+                    if (values == NULL) {
+                        ALOGE("%s: No memory for rational array", __func__);
+                        rc = NO_MEMORY;
+                    } else {
+                        memcpy(values, data, count * sizeof(rat_t));
+                        m_Entries[m_nNumEntries].tag_entry.data._rats = values;
+                    }
+                } else {
+                    m_Entries[m_nNumEntries].tag_entry.data._rat =
+                        *(rat_t *)data;
+                }
+            }
+            break;
+        case EXIF_UNDEFINED:
+            {
+                uint8_t *values = (uint8_t *)malloc(count);
+                if (values == NULL) {
+                    ALOGE("%s: No memory for undefined array", __func__);
+                    rc = NO_MEMORY;
+                } else {
+                    memcpy(values, data, count);
+                    m_Entries[m_nNumEntries].tag_entry.data._undefined = values;
+                }
+            }
+            break;
+        case EXIF_SLONG:
+            {
+                if (count > 1) {
+                    int32_t *values =
+                        (int32_t *)malloc(count * sizeof(int32_t));
+                    if (values == NULL) {
+                        ALOGE("%s: No memory for signed long array", __func__);
+                        rc = NO_MEMORY;
+                    } else {
+                        memcpy(values, data, count * sizeof(int32_t));
+                        m_Entries[m_nNumEntries].tag_entry.data._slongs =values;
+                    }
+                } else {
+                    m_Entries[m_nNumEntries].tag_entry.data._slong =
+                        *(int32_t *)data;
+                }
+            }
+            break;
+        case EXIF_SRATIONAL:
+            {
+                if (count > 1) {
+                    srat_t *values = (srat_t *)malloc(count * sizeof(srat_t));
+                    if (values == NULL) {
+                        ALOGE("%s: No memory for sign rational array",__func__);
+                        rc = NO_MEMORY;
+                    } else {
+                        memcpy(values, data, count * sizeof(srat_t));
+                        m_Entries[m_nNumEntries].tag_entry.data._srats = values;
+                    }
+                } else {
+                    m_Entries[m_nNumEntries].tag_entry.data._srat =
+                        *(srat_t *)data;
+                }
+            }
+            break;
+        default:
+            ALOGE("%s: Error, Unknown type",__func__);
+            break;
+    }
+
+    // Increase number of entries
+    m_nNumEntries++;
+    return rc;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL3/QCamera3PostProc.h b/camera/QCamera2/HAL3/QCamera3PostProc.h
new file mode 100644
index 0000000..287e299
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3PostProc.h
@@ -0,0 +1,192 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCamera3_POSTPROC_H__
+#define __QCamera3_POSTPROC_H__
+
+extern "C" {
+#include <mm_camera_interface.h>
+#include <mm_jpeg_interface.h>
+}
+#include <hardware/camera3.h>
+//#include "QCamera3HWI.h"
+#include "QCameraQueue.h"
+#include "QCameraCmdThread.h"
+#include "QCamera3HALHeader.h"
+
+namespace qcamera {
+
+class QCamera3Exif;
+class QCamera3Channel;
+class QCamera3ProcessingChannel;
+class QCamera3PicChannel;
+class QCamera3ReprocessChannel;
+class QCamera3Stream;
+class QCamera3Memory;
+class QCamera3StreamMem;
+
+typedef struct {
+    camera3_stream_buffer_t src_frame;// source frame
+    mm_camera_buf_def_t metadata_buffer;
+    mm_camera_buf_def_t input_buffer;
+    reprocess_config_t reproc_config;
+    buffer_handle_t *output_buffer;
+    uint32_t frameNumber;
+} qcamera_fwk_input_pp_data_t;
+
+typedef struct {
+    uint32_t jobId;                  // job ID
+    uint32_t client_hdl;             // handle of jpeg client (obtained when open jpeg)
+    mm_camera_super_buf_t *src_frame;// source frame (need to be returned back to kernel after done)
+    mm_camera_super_buf_t *src_reproc_frame; // original source frame for reproc if not NULL
+    qcamera_fwk_input_pp_data_t *fwk_frame; // source framework buffer
+    qcamera_fwk_input_pp_data_t *fwk_src_buffer; // original framework source frame for reproc
+    QCamera3Exif *pJpegExifObj;
+    metadata_buffer_t *metadata;
+    mm_camera_super_buf_t *src_metadata;
+    jpeg_settings_t *jpeg_settings;
+} qcamera_hal3_jpeg_data_t;
+
+typedef struct {
+    uint32_t jobId;                  // job ID
+    mm_camera_super_buf_t *src_frame;// source frame (need to be returned back to kernel after done)
+    qcamera_fwk_input_pp_data_t *fwk_src_frame;// source frame
+    metadata_buffer_t *metadata;
+    jpeg_settings_t *jpeg_settings;
+    mm_camera_super_buf_t *src_metadata;
+} qcamera_hal3_pp_data_t;
+
+typedef struct {
+    mm_camera_super_buf_t *input;
+    buffer_handle_t *output;
+    uint32_t frameNumber;
+} qcamera_hal3_pp_buffer_t;
+
+#define MAX_HAL3_EXIF_TABLE_ENTRIES 23
+class QCamera3Exif
+{
+public:
+    QCamera3Exif();
+    virtual ~QCamera3Exif();
+
+    int32_t addEntry(exif_tag_id_t tagid,
+                     exif_tag_type_t type,
+                     uint32_t count,
+                     void *data);
+    uint32_t getNumOfEntries() {return m_nNumEntries;};
+    QEXIF_INFO_DATA *getEntries() {return m_Entries;};
+
+private:
+    QEXIF_INFO_DATA m_Entries[MAX_HAL3_EXIF_TABLE_ENTRIES];  // exif tags for JPEG encoder
+    uint32_t  m_nNumEntries;                            // number of valid entries
+};
+
+class QCamera3PostProcessor
+{
+public:
+    QCamera3PostProcessor(QCamera3ProcessingChannel *ch_ctrl);
+    virtual ~QCamera3PostProcessor();
+
+    int32_t init(QCamera3StreamMem *mMemory,
+            uint32_t postprocess_mask);
+    int32_t initJpeg(jpeg_encode_callback_t jpeg_cb,
+            cam_dimension_t *m_max_pic_dim,
+            void *user_data);
+    int32_t deinit();
+    int32_t start(const reprocess_config_t &config);
+    int32_t stop();
+    int32_t processData(qcamera_fwk_input_pp_data_t *frame);
+    int32_t processData(mm_camera_super_buf_t *input,
+            buffer_handle_t *output, uint32_t frameNumber);
+    int32_t processData(mm_camera_super_buf_t *input);
+    int32_t processPPData(mm_camera_super_buf_t *frame);
+    int32_t processPPMetadata(mm_camera_super_buf_t *reproc_meta);
+    int32_t processJpegSettingData(jpeg_settings_t *jpeg_settings);
+    qcamera_hal3_pp_data_t *dequeuePPJob(uint32_t frameNumber);
+    qcamera_hal3_jpeg_data_t *findJpegJobByJobId(uint32_t jobId);
+    void releaseJpegJobData(qcamera_hal3_jpeg_data_t *job);
+    void releasePPJobData(qcamera_hal3_pp_data_t *job);
+    int32_t releaseOfflineBuffers();
+
+private:
+    int32_t sendEvtNotify(int32_t msg_type, int32_t ext1, int32_t ext2);
+    mm_jpeg_color_format getColorfmtFromImgFmt(cam_format_t img_fmt);
+    mm_jpeg_format_t getJpegImgTypeFromImgFmt(cam_format_t img_fmt);
+    int32_t getJpegEncodeConfig(mm_jpeg_encode_params_t& encode_parm,
+                                  QCamera3Stream *main_stream,
+                                  jpeg_settings_t *jpeg_settings);
+    int32_t getFWKJpegEncodeConfig(mm_jpeg_encode_params_t& encode_parm,
+            qcamera_fwk_input_pp_data_t *frame,
+            jpeg_settings_t *jpeg_settings);
+    QCamera3Exif * getExifData(metadata_buffer_t *metadata,
+            jpeg_settings_t *jpeg_settings);
+    int32_t encodeData(qcamera_hal3_jpeg_data_t *jpeg_job_data,
+                       uint8_t &needNewSess);
+    int32_t encodeFWKData(qcamera_hal3_jpeg_data_t *jpeg_job_data,
+            uint8_t &needNewSess);
+    void releaseSuperBuf(mm_camera_super_buf_t *super_buf);
+    static void releaseNotifyData(void *user_data, void *cookie);
+    int32_t processRawImageImpl(mm_camera_super_buf_t *recvd_frame);
+
+    static void releaseJpegData(void *data, void *user_data);
+    static void releasePPInputData(void *data, void *user_data);
+    static void releaseMetadata(void *data, void *user_data);
+    static void releaseOngoingPPData(void *data, void *user_data);
+
+    static void *dataProcessRoutine(void *data);
+
+private:
+    QCamera3ProcessingChannel  *m_parent;
+    jpeg_encode_callback_t     mJpegCB;
+    void *                     mJpegUserData;
+    mm_jpeg_ops_t              mJpegHandle;
+    uint32_t                   mJpegClientHandle;
+    uint32_t                   mJpegSessionId;
+    uint32_t                   mPostProcMask;
+
+    uint32_t                   m_bThumbnailNeeded;
+    QCamera3StreamMem          *mOutputMem;
+    QCamera3ReprocessChannel *  m_pReprocChannel;
+
+    QCameraQueue m_inputPPQ;            // input queue for postproc
+    QCameraQueue m_inputFWKPPQ;         // framework input queue for postproc
+    QCameraQueue m_ongoingPPQ;          // ongoing postproc queue
+    QCameraQueue m_inputJpegQ;          // input jpeg job queue
+    QCameraQueue m_ongoingJpegQ;        // ongoing jpeg job queue
+    QCameraQueue m_inputRawQ;           // input raw job queue
+    QCameraQueue m_inputMetaQ;          // input meta queue
+    QCameraQueue m_jpegSettingsQ;       // input jpeg setting queue
+    QCameraCmdThread m_dataProcTh;      // thread for data processing
+
+    pthread_mutex_t mReprocJobLock;
+};
+
+}; // namespace qcamera
+
+#endif /* __QCamera3_POSTPROC_H__ */
diff --git a/camera/QCamera2/HAL3/QCamera3Stream.cpp b/camera/QCamera2/HAL3/QCamera3Stream.cpp
new file mode 100644
index 0000000..efd00e5
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3Stream.cpp
@@ -0,0 +1,1485 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define LOG_TAG "QCamera3Stream"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Errors.h>
+#include "QCamera3HWI.h"
+#include "QCamera3Stream.h"
+#include "QCamera3Channel.h"
+
+using namespace android;
+
+namespace qcamera {
+#define MAX_BATCH_SIZE   32
+
+/*===========================================================================
+ * FUNCTION   : get_bufs
+ *
+ * DESCRIPTION: static function entry to allocate stream buffers
+ *
+ * PARAMETERS :
+ *   @offset     : offset info of stream buffers
+ *   @num_bufs   : number of buffers allocated
+ *   @initial_reg_flag: flag to indicate if buffer needs to be registered
+ *                      at kernel initially
+ *   @bufs       : output of allocated buffers
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::get_bufs(
+                     cam_frame_len_offset_t *offset,
+                     uint8_t *num_bufs,
+                     uint8_t **initial_reg_flag,
+                     mm_camera_buf_def_t **bufs,
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                     void *user_data)
+{
+    int32_t rc = NO_ERROR;
+    QCamera3Stream *stream = reinterpret_cast<QCamera3Stream *>(user_data);
+    if (!stream) {
+        ALOGE("%s: getBufs invalid stream pointer", __func__);
+        return NO_MEMORY;
+    }
+    rc = stream->getBufs(offset, num_bufs, initial_reg_flag, bufs, ops_tbl);
+    if (NO_ERROR != rc) {
+        ALOGE("%s: stream->getBufs failed", __func__);
+        return NO_MEMORY;
+    }
+    if (stream->mBatchSize) {
+        //Allocate batch buffers if mBatchSize is non-zero. All the output
+        //arguments correspond to batch containers and not image buffers
+        rc = stream->getBatchBufs(num_bufs, initial_reg_flag,
+                bufs, ops_tbl);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : put_bufs
+ *
+ * DESCRIPTION: static function entry to deallocate stream buffers
+ *
+ * PARAMETERS :
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::put_bufs(
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                     void *user_data)
+{
+    int32_t rc = NO_ERROR;
+    QCamera3Stream *stream = reinterpret_cast<QCamera3Stream *>(user_data);
+    if (!stream) {
+        ALOGE("putBufs invalid stream pointer");
+        return NO_MEMORY;
+    }
+
+    if (stream->mBatchSize) {
+        rc = stream->putBatchBufs(ops_tbl);
+        if (NO_ERROR != rc) {
+            ALOGE("%s: stream->putBatchBufs failed", __func__);
+        }
+    }
+    rc = stream->putBufs(ops_tbl);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : invalidate_buf
+ *
+ * DESCRIPTION: static function entry to invalidate a specific stream buffer
+ *
+ * PARAMETERS :
+ *   @index      : index of the stream buffer to invalidate
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::invalidate_buf(uint32_t index, void *user_data)
+{
+    int32_t rc = NO_ERROR;
+
+    QCamera3Stream *stream = reinterpret_cast<QCamera3Stream *>(user_data);
+    if (!stream) {
+        ALOGE("invalid stream pointer");
+        return NO_MEMORY;
+    }
+    if (stream->mBatchSize) {
+        int32_t retVal = NO_ERROR;
+        for (size_t i = 0;
+                i < stream->mBatchBufDefs[index].user_buf.bufs_used; i++) {
+            uint32_t buf_idx = stream->mBatchBufDefs[index].user_buf.buf_idx[i];
+            retVal = stream->invalidateBuf(buf_idx);
+            if (NO_ERROR != retVal) {
+                ALOGE("%s: invalidateBuf failed for buf_idx: %d err: %d",
+                        __func__, buf_idx, retVal);
+            }
+            rc |= retVal;
+        }
+    } else {
+        rc = stream->invalidateBuf(index);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : clean_invalidate_buf
+ *
+ * DESCRIPTION: static function entry to clean and invalidate a specific stream buffer
+ *
+ * PARAMETERS :
+ *   @index      : index of the stream buffer to invalidate
+ *   @user_data  : user data ptr of ops_tbl
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::clean_invalidate_buf(uint32_t index, void *user_data)
+{
+    int32_t rc = NO_ERROR;
+
+    QCamera3Stream *stream = reinterpret_cast<QCamera3Stream *>(user_data);
+    if (!stream) {
+        ALOGE("invalid stream pointer");
+        return NO_MEMORY;
+    }
+    if (stream->mBatchSize) {
+        int32_t retVal = NO_ERROR;
+        for (size_t i = 0;
+                i < stream->mBatchBufDefs[index].user_buf.bufs_used; i++) {
+            uint32_t buf_idx = stream->mBatchBufDefs[index].user_buf.buf_idx[i];
+            retVal = stream->cleanInvalidateBuf(buf_idx);
+            if (NO_ERROR != retVal) {
+                ALOGE("%s: invalidateBuf failed for buf_idx: %d err: %d",
+                        __func__, buf_idx, retVal);
+            }
+            rc |= retVal;
+        }
+    } else {
+        rc = stream->cleanInvalidateBuf(index);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCamera3Stream
+ *
+ * DESCRIPTION: constructor of QCamera3Stream
+ *
+ * PARAMETERS :
+ *   @allocator  : memory allocator obj
+ *   @camHandle  : camera handle
+ *   @chId       : channel handle
+ *   @camOps     : ptr to camera ops table
+ *   @paddingInfo: ptr to padding info
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3Stream::QCamera3Stream(uint32_t camHandle,
+                             uint32_t chId,
+                             mm_camera_ops_t *camOps,
+                             cam_padding_info_t *paddingInfo,
+                             QCamera3Channel *channel) :
+        mCamHandle(camHandle),
+        mChannelHandle(chId),
+        mHandle(0),
+        mCamOps(camOps),
+        mStreamInfo(NULL),
+        mMemOps(NULL),
+        mNumBufs(0),
+        mDataCB(NULL),
+        mUserData(NULL),
+        mDataQ(releaseFrameData, this),
+        mStreamInfoBuf(NULL),
+        mStreamBufs(NULL),
+        mBufDefs(NULL),
+        mChannel(channel),
+        mBatchSize(0),
+        mNumBatchBufs(0),
+        mStreamBatchBufs(NULL),
+        mBatchBufDefs(NULL),
+        mCurrentBatchBufDef(NULL),
+        mBufsStaged(0),
+        mFreeBatchBufQ(NULL, this)
+{
+    mMemVtbl.user_data = this;
+    mMemVtbl.get_bufs = get_bufs;
+    mMemVtbl.put_bufs = put_bufs;
+    mMemVtbl.invalidate_buf = invalidate_buf;
+    mMemVtbl.clean_invalidate_buf = clean_invalidate_buf;
+    memset(&mFrameLenOffset, 0, sizeof(mFrameLenOffset));
+    memcpy(&mPaddingInfo, paddingInfo, sizeof(cam_padding_info_t));
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera3Stream
+ *
+ * DESCRIPTION: deconstructor of QCamera3Stream
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3Stream::~QCamera3Stream()
+{
+    if (mStreamInfoBuf != NULL) {
+        int rc = mCamOps->unmap_stream_buf(mCamHandle,
+                    mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO, 0, -1);
+        if (rc < 0) {
+            ALOGE("Failed to un-map stream info buffer");
+        }
+        mStreamInfoBuf->deallocate();
+        delete mStreamInfoBuf;
+        mStreamInfoBuf = NULL;
+    }
+    // delete stream
+    if (mHandle > 0) {
+        mCamOps->delete_stream(mCamHandle, mChannelHandle, mHandle);
+        mHandle = 0;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : init
+ *
+ * DESCRIPTION: initialize stream obj
+ *
+ * PARAMETERS :
+ *   @streamType     : stream type
+ *   @streamFormat   : stream format
+ *   @streamDim      : stream dimension
+ *   @reprocess_config: reprocess stream input configuration
+ *   @minNumBuffers  : minimal buffer count for particular stream type
+ *   @postprocess_mask: PP mask
+ *   @is_type  : Image stabilization type, cam_is_type_t
+ *   @batchSize  : Number of image buffers in a batch.
+ *                 0: No batch. N: container with N image buffers
+ *   @stream_cb      : callback handle
+ *   @userdata       : user data
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::init(cam_stream_type_t streamType,
+                            cam_format_t streamFormat,
+                            cam_dimension_t streamDim,
+                            cam_rotation_t streamRotation,
+                            cam_stream_reproc_config_t* reprocess_config,
+                            uint8_t minNumBuffers,
+                            uint32_t postprocess_mask,
+                            cam_is_type_t is_type,
+                            uint32_t batchSize,
+                            hal3_stream_cb_routine stream_cb,
+                            void *userdata)
+{
+    int32_t rc = OK;
+    ssize_t bufSize = BAD_INDEX;
+    mm_camera_stream_config_t stream_config;
+    CDBG("%s: batch size is %d", __func__, batchSize);
+
+    mHandle = mCamOps->add_stream(mCamHandle, mChannelHandle);
+    if (!mHandle) {
+        ALOGE("add_stream failed");
+        rc = UNKNOWN_ERROR;
+        goto done;
+    }
+
+    // allocate and map stream info memory
+    mStreamInfoBuf = new QCamera3HeapMemory(1);
+    if (mStreamInfoBuf == NULL) {
+        ALOGE("%s: no memory for stream info buf obj", __func__);
+        rc = -ENOMEM;
+        goto err1;
+    }
+    rc = mStreamInfoBuf->allocate(sizeof(cam_stream_info_t));
+    if (rc < 0) {
+        ALOGE("%s: no memory for stream info", __func__);
+        rc = -ENOMEM;
+        goto err2;
+    }
+
+    mStreamInfo =
+        reinterpret_cast<cam_stream_info_t *>(mStreamInfoBuf->getPtr(0));
+    memset(mStreamInfo, 0, sizeof(cam_stream_info_t));
+    mStreamInfo->stream_type = streamType;
+    mStreamInfo->fmt = streamFormat;
+    mStreamInfo->dim = streamDim;
+    mStreamInfo->num_bufs = minNumBuffers;
+    mStreamInfo->pp_config.feature_mask = postprocess_mask;
+    mStreamInfo->is_type = is_type;
+    mStreamInfo->pp_config.rotation = streamRotation;
+    ALOGI("%s: stream_type is %d, feature_mask is %d", __func__,
+            mStreamInfo->stream_type, mStreamInfo->pp_config.feature_mask);
+
+    bufSize = mStreamInfoBuf->getSize(0);
+    if (BAD_INDEX != bufSize) {
+        rc = mCamOps->map_stream_buf(mCamHandle,
+                mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO,
+                0, -1, mStreamInfoBuf->getFd(0), (size_t)bufSize);
+        if (rc < 0) {
+            ALOGE("Failed to map stream info buffer");
+            goto err3;
+        }
+    } else {
+        ALOGE("Failed to retrieve buffer size (bad index)");
+        goto err3;
+    }
+
+    mNumBufs = minNumBuffers;
+    if (reprocess_config != NULL) {
+        mStreamInfo->reprocess_config = *reprocess_config;
+        mStreamInfo->streaming_mode = CAM_STREAMING_MODE_BURST;
+        //mStreamInfo->num_of_burst = reprocess_config->offline.num_of_bufs;
+        mStreamInfo->num_of_burst = 1;
+    } else if (batchSize) {
+        if (batchSize > MAX_BATCH_SIZE) {
+            ALOGE("%s: batchSize:%d is very large", __func__, batchSize);
+            rc = BAD_VALUE;
+            goto err4;
+        }
+        else {
+            mNumBatchBufs = MAX_INFLIGHT_HFR_REQUESTS / batchSize;
+            mStreamInfo->streaming_mode = CAM_STREAMING_MODE_BATCH;
+            mStreamInfo->user_buf_info.frame_buf_cnt = batchSize;
+            mStreamInfo->user_buf_info.size =
+                    (uint32_t)(sizeof(msm_camera_user_buf_cont_t));
+            mStreamInfo->num_bufs = mNumBatchBufs;
+            //Frame interval is irrelavent since time stamp calculation is not
+            //required from the mCamOps
+            mStreamInfo->user_buf_info.frameInterval = 0;
+            CDBG("%s: batch size is %d", __func__, batchSize);
+        }
+    } else {
+        mStreamInfo->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    }
+
+    // Configure the stream
+    stream_config.stream_info = mStreamInfo;
+    stream_config.mem_vtbl = mMemVtbl;
+    stream_config.padding_info = mPaddingInfo;
+    stream_config.userdata = this;
+    stream_config.stream_cb = dataNotifyCB;
+
+    rc = mCamOps->config_stream(mCamHandle,
+            mChannelHandle, mHandle, &stream_config);
+    if (rc < 0) {
+        ALOGE("Failed to config stream, rc = %d", rc);
+        goto err4;
+    }
+
+    mDataCB = stream_cb;
+    mUserData = userdata;
+    mBatchSize = batchSize;
+    return 0;
+
+err4:
+    mCamOps->unmap_stream_buf(mCamHandle,
+            mChannelHandle, mHandle, CAM_MAPPING_BUF_TYPE_STREAM_INFO, 0, -1);
+err3:
+    mStreamInfoBuf->deallocate();
+err2:
+    delete mStreamInfoBuf;
+    mStreamInfoBuf = NULL;
+    mStreamInfo = NULL;
+err1:
+    mCamOps->delete_stream(mCamHandle, mChannelHandle, mHandle);
+    mHandle = 0;
+    mNumBufs = 0;
+done:
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : start
+ *
+ * DESCRIPTION: start stream. Will start main stream thread to handle stream
+ *              related ops.
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::start()
+{
+    int32_t rc = 0;
+
+    mDataQ.init();
+    if (mBatchSize)
+        mFreeBatchBufQ.init();
+    rc = mProcTh.launch(dataProcRoutine, this);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : stop
+ *
+ * DESCRIPTION: stop stream. Will stop main stream thread
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::stop()
+{
+    int32_t rc = 0;
+    rc = mProcTh.exit();
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : processDataNotify
+ *
+ * DESCRIPTION: process stream data notify
+ *
+ * PARAMETERS :
+ *   @frame   : stream frame received
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::processDataNotify(mm_camera_super_buf_t *frame)
+{
+    CDBG("%s: E\n", __func__);
+    int32_t rc;
+    if (mDataQ.enqueue((void *)frame)) {
+        rc = mProcTh.sendCmd(CAMERA_CMD_TYPE_DO_NEXT_JOB, FALSE, FALSE);
+    } else {
+        ALOGD("%s: Stream thread is not active, no ops here", __func__);
+        bufDone(frame->bufs[0]->buf_idx);
+        free(frame);
+        rc = NO_ERROR;
+    }
+    CDBG("%s: X\n", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : dataNotifyCB
+ *
+ * DESCRIPTION: callback for data notify. This function is registered with
+ *              mm-camera-interface to handle data notify
+ *
+ * PARAMETERS :
+ *   @recvd_frame   : stream frame received
+ *   userdata       : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3Stream::dataNotifyCB(mm_camera_super_buf_t *recvd_frame,
+                                 void *userdata)
+{
+    CDBG("%s: E\n", __func__);
+    QCamera3Stream* stream = (QCamera3Stream *)userdata;
+    if (stream == NULL ||
+        recvd_frame == NULL ||
+        recvd_frame->bufs[0] == NULL ||
+        recvd_frame->bufs[0]->stream_id != stream->getMyHandle()) {
+        ALOGE("%s: Not a valid stream to handle buf", __func__);
+        return;
+    }
+
+    mm_camera_super_buf_t *frame =
+        (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+    if (frame == NULL) {
+        ALOGE("%s: No mem for mm_camera_buf_def_t", __func__);
+        stream->bufDone(recvd_frame->bufs[0]->buf_idx);
+        return;
+    }
+    *frame = *recvd_frame;
+    stream->processDataNotify(frame);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : dataProcRoutine
+ *
+ * DESCRIPTION: function to process data in the main stream thread
+ *
+ * PARAMETERS :
+ *   @data    : user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void *QCamera3Stream::dataProcRoutine(void *data)
+{
+    int running = 1;
+    int ret;
+    QCamera3Stream *pme = (QCamera3Stream *)data;
+    QCameraCmdThread *cmdThread = &pme->mProcTh;
+    cmdThread->setName("cam_stream_proc");
+
+    CDBG("%s: E", __func__);
+    do {
+        do {
+            ret = cam_sem_wait(&cmdThread->cmd_sem);
+            if (ret != 0 && errno != EINVAL) {
+                ALOGE("%s: cam_sem_wait error (%s)",
+                      __func__, strerror(errno));
+                return NULL;
+            }
+        } while (ret != 0);
+
+        // we got notified about new cmd avail in cmd queue
+        camera_cmd_type_t cmd = cmdThread->getCmd();
+        switch (cmd) {
+        case CAMERA_CMD_TYPE_DO_NEXT_JOB:
+            {
+                CDBG("%s: Do next job", __func__);
+                mm_camera_super_buf_t *frame =
+                    (mm_camera_super_buf_t *)pme->mDataQ.dequeue();
+                if (NULL != frame) {
+                    if (UNLIKELY(frame->bufs[0]->buf_type ==
+                            CAM_STREAM_BUF_TYPE_USERPTR)) {
+                        pme->handleBatchBuffer(frame);
+                    } else if (pme->mDataCB != NULL) {
+                        pme->mDataCB(frame, pme, pme->mUserData);
+                    } else {
+                        // no data cb routine, return buf here
+                        pme->bufDone(frame->bufs[0]->buf_idx);
+                    }
+                }
+            }
+            break;
+        case CAMERA_CMD_TYPE_EXIT:
+            CDBG_HIGH("%s: Exit", __func__);
+            /* flush data buf queue */
+            pme->mDataQ.flush();
+            pme->flushFreeBatchBufQ();
+            running = 0;
+            break;
+        default:
+            break;
+        }
+    } while (running);
+    CDBG("%s: X", __func__);
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : bufDone
+ *
+ * DESCRIPTION: return stream buffer to kernel
+ *
+ * PARAMETERS :
+ *   @index   : index of buffer to be returned
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::bufDone(uint32_t index)
+{
+    int32_t rc = NO_ERROR;
+    Mutex::Autolock lock(mLock);
+
+    if ((index >= mNumBufs) || (mBufDefs == NULL)) {
+        ALOGE("%s: index; %d, mNumBufs: %d", __func__, index, mNumBufs);
+        return BAD_INDEX;
+    }
+
+    if( NULL == mBufDefs[index].mem_info) {
+        if (NULL == mMemOps) {
+            ALOGE("%s: Camera operations not initialized", __func__);
+            return NO_INIT;
+        }
+
+        ssize_t bufSize = mStreamBufs->getSize(index);
+
+        if (BAD_INDEX != bufSize) {
+            CDBG("%s: Map streamBufIdx: %d", __func__, index);
+            rc = mMemOps->map_ops(index, -1, mStreamBufs->getFd(index),
+                    (size_t)bufSize, CAM_MAPPING_BUF_TYPE_STREAM_BUF, mMemOps->userdata);
+            if (rc < 0) {
+                ALOGE("%s: Failed to map camera buffer %d", __func__, index);
+                return rc;
+            }
+
+            rc = mStreamBufs->getBufDef(mFrameLenOffset, mBufDefs[index], index);
+            if (NO_ERROR != rc) {
+                ALOGE("%s: Couldn't find camera buffer definition", __func__);
+                mMemOps->unmap_ops(index, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, mMemOps->userdata);
+                return rc;
+            }
+        } else {
+            ALOGE("Failed to retrieve buffer size (bad index)");
+            return INVALID_OPERATION;
+        }
+    }
+
+    if (UNLIKELY(mBatchSize)) {
+        rc = aggregateBufToBatch(mBufDefs[index]);
+    } else {
+        rc = mCamOps->qbuf(mCamHandle, mChannelHandle, &mBufDefs[index]);
+        if (rc < 0) {
+            return FAILED_TRANSACTION;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : bufRelease
+ *
+ * DESCRIPTION: release all resources associated with this buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of buffer to be released
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::bufRelease(int32_t index)
+{
+    int32_t rc = NO_ERROR;
+    Mutex::Autolock lock(mLock);
+
+    if ((index >= mNumBufs) || (mBufDefs == NULL)) {
+        return BAD_INDEX;
+    }
+
+    if (NULL != mBufDefs[index].mem_info) {
+        if (NULL == mMemOps) {
+            ALOGE("%s: Camera operations not initialized", __func__);
+            return NO_INIT;
+        }
+
+        rc = mMemOps->unmap_ops(index, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, mMemOps->userdata);
+        if (rc < 0) {
+            ALOGE("%s: Failed to un-map camera buffer %d", __func__, index);
+            return rc;
+        }
+
+        mBufDefs[index].mem_info = NULL;
+    } else {
+        ALOGE("%s: Buffer at index %d not registered", __func__);
+        return BAD_INDEX;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufs
+ *
+ * DESCRIPTION: allocate stream buffers
+ *
+ * PARAMETERS :
+ *   @offset     : offset info of stream buffers
+ *   @num_bufs   : number of buffers allocated
+ *   @initial_reg_flag: flag to indicate if buffer needs to be registered
+ *                      at kernel initially
+ *   @bufs       : output of allocated buffers
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::getBufs(cam_frame_len_offset_t *offset,
+                     uint8_t *num_bufs,
+                     uint8_t **initial_reg_flag,
+                     mm_camera_buf_def_t **bufs,
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int rc = NO_ERROR;
+    uint8_t *regFlags;
+    Mutex::Autolock lock(mLock);
+
+    if (!ops_tbl) {
+        ALOGE("%s: ops_tbl is NULL", __func__);
+        return INVALID_OPERATION;
+    }
+
+    mFrameLenOffset = *offset;
+    mMemOps = ops_tbl;
+
+    mStreamBufs = mChannel->getStreamBufs(mFrameLenOffset.frame_len);
+    if (!mStreamBufs) {
+        ALOGE("%s: Failed to allocate stream buffers", __func__);
+        return NO_MEMORY;
+    }
+
+    for (uint32_t i = 0; i < mNumBufs; i++) {
+        if (mStreamBufs->valid(i)) {
+            ssize_t bufSize = mStreamBufs->getSize(i);
+            if (BAD_INDEX != bufSize) {
+                rc = ops_tbl->map_ops(i, -1, mStreamBufs->getFd(i),
+                        (size_t)bufSize, CAM_MAPPING_BUF_TYPE_STREAM_BUF,
+                        ops_tbl->userdata);
+                if (rc < 0) {
+                    ALOGE("%s: map_stream_buf failed: %d", __func__, rc);
+                    for (uint32_t j = 0; j < i; j++) {
+                        if (mStreamBufs->valid(j)) {
+                            ops_tbl->unmap_ops(j, -1,
+                                    CAM_MAPPING_BUF_TYPE_STREAM_BUF,
+                                    ops_tbl->userdata);
+                        }
+                    }
+                    return INVALID_OPERATION;
+                }
+            } else {
+                ALOGE("Failed to retrieve buffer size (bad index)");
+                return INVALID_OPERATION;
+            }
+        }
+    }
+
+    //regFlags array is allocated by us, but consumed and freed by mm-camera-interface
+    regFlags = (uint8_t *)malloc(sizeof(uint8_t) * mNumBufs);
+    if (!regFlags) {
+        ALOGE("%s: Out of memory", __func__);
+        for (uint32_t i = 0; i < mNumBufs; i++) {
+            if (mStreamBufs->valid(i)) {
+                ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF,
+                        ops_tbl->userdata);
+            }
+        }
+        return NO_MEMORY;
+    }
+    memset(regFlags, 0, sizeof(uint8_t) * mNumBufs);
+
+    mBufDefs = (mm_camera_buf_def_t *)malloc(mNumBufs * sizeof(mm_camera_buf_def_t));
+    if (mBufDefs == NULL) {
+        ALOGE("%s: Failed to allocate mm_camera_buf_def_t %d", __func__, rc);
+        for (uint32_t i = 0; i < mNumBufs; i++) {
+            if (mStreamBufs->valid(i)) {
+                ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF,
+                        ops_tbl->userdata);
+            }
+        }
+        free(regFlags);
+        regFlags = NULL;
+        return INVALID_OPERATION;
+    }
+    memset(mBufDefs, 0, mNumBufs * sizeof(mm_camera_buf_def_t));
+    for (uint32_t i = 0; i < mNumBufs; i++) {
+        if (mStreamBufs->valid(i)) {
+            mStreamBufs->getBufDef(mFrameLenOffset, mBufDefs[i], i);
+        }
+    }
+
+    rc = mStreamBufs->getRegFlags(regFlags);
+    if (rc < 0) {
+        ALOGE("%s: getRegFlags failed %d", __func__, rc);
+        for (uint32_t i = 0; i < mNumBufs; i++) {
+            if (mStreamBufs->valid(i)) {
+                ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF,
+                        ops_tbl->userdata);
+            }
+        }
+        free(mBufDefs);
+        mBufDefs = NULL;
+        free(regFlags);
+        regFlags = NULL;
+        return INVALID_OPERATION;
+    }
+
+    *num_bufs = mNumBufs;
+    *initial_reg_flag = regFlags;
+    *bufs = mBufDefs;
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : putBufs
+ *
+ * DESCRIPTION: deallocate stream buffers
+ *
+ * PARAMETERS :
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::putBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int rc = NO_ERROR;
+    Mutex::Autolock lock(mLock);
+
+    for (uint32_t i = 0; i < mNumBufs; i++) {
+        if (mStreamBufs->valid(i) && NULL != mBufDefs[i].mem_info) {
+            rc = ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+            if (rc < 0) {
+                ALOGE("%s: un-map stream buf failed: %d", __func__, rc);
+            }
+        }
+    }
+    mBufDefs = NULL; // mBufDefs just keep a ptr to the buffer
+                     // mm-camera-interface own the buffer, so no need to free
+    memset(&mFrameLenOffset, 0, sizeof(mFrameLenOffset));
+    mChannel->putStreamBufs();
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : invalidateBuf
+ *
+ * DESCRIPTION: invalidate a specific stream buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer to invalidate
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::invalidateBuf(uint32_t index)
+{
+    return mStreamBufs->invalidateCache(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : cleanInvalidateBuf
+ *
+ * DESCRIPTION: clean and invalidate a specific stream buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer to invalidate
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::cleanInvalidateBuf(uint32_t index)
+{
+    return mStreamBufs->cleanInvalidateCache(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : getFrameOffset
+ *
+ * DESCRIPTION: query stream buffer frame offset info
+ *
+ * PARAMETERS :
+ *   @offset  : reference to struct to store the queried frame offset info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::getFrameOffset(cam_frame_len_offset_t &offset)
+{
+    offset = mFrameLenOffset;
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFrameDimension
+ *
+ * DESCRIPTION: query stream frame dimension info
+ *
+ * PARAMETERS :
+ *   @dim     : reference to struct to store the queried frame dimension
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::getFrameDimension(cam_dimension_t &dim)
+{
+    if (mStreamInfo != NULL) {
+        dim = mStreamInfo->dim;
+        return 0;
+    }
+    return -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFormat
+ *
+ * DESCRIPTION: query stream format
+ *
+ * PARAMETERS :
+ *   @fmt     : reference to stream format
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::getFormat(cam_format_t &fmt)
+{
+    if (mStreamInfo != NULL) {
+        fmt = mStreamInfo->fmt;
+        return 0;
+    }
+    return -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : getMyServerID
+ *
+ * DESCRIPTION: query server stream ID
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : stream ID from server
+ *==========================================================================*/
+uint32_t QCamera3Stream::getMyServerID() {
+    if (mStreamInfo != NULL) {
+        return mStreamInfo->stream_svr_id;
+    } else {
+        return 0;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getMyType
+ *
+ * DESCRIPTION: query stream type
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : type of stream
+ *==========================================================================*/
+cam_stream_type_t QCamera3Stream::getMyType() const
+{
+    if (mStreamInfo != NULL) {
+        return mStreamInfo->stream_type;
+    } else {
+        return CAM_STREAM_TYPE_MAX;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : mapBuf
+ *
+ * DESCRIPTION: map stream related buffer to backend server
+ *
+ * PARAMETERS :
+ *   @buf_type : mapping type of buffer
+ *   @buf_idx  : index of buffer
+ *   @plane_idx: plane index
+ *   @fd       : fd of the buffer
+ *   @size     : lenght of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::mapBuf(uint8_t buf_type, uint32_t buf_idx,
+        int32_t plane_idx, int fd, size_t size)
+{
+    return mCamOps->map_stream_buf(mCamHandle, mChannelHandle,
+                                   mHandle, buf_type,
+                                   buf_idx, plane_idx,
+                                   fd, size);
+
+}
+
+/*===========================================================================
+ * FUNCTION   : unmapBuf
+ *
+ * DESCRIPTION: unmap stream related buffer to backend server
+ *
+ * PARAMETERS :
+ *   @buf_type : mapping type of buffer
+ *   @buf_idx  : index of buffer
+ *   @plane_idx: plane index
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::unmapBuf(uint8_t buf_type, uint32_t buf_idx, int32_t plane_idx)
+{
+    return mCamOps->unmap_stream_buf(mCamHandle, mChannelHandle,
+                                     mHandle, buf_type,
+                                     buf_idx, plane_idx);
+}
+
+/*===========================================================================
+ * FUNCTION   : setParameter
+ *
+ * DESCRIPTION: set stream based parameters
+ *
+ * PARAMETERS :
+ *   @param   : ptr to parameters to be set
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::setParameter(cam_stream_parm_buffer_t &param)
+{
+    int32_t rc = NO_ERROR;
+    mStreamInfo->parm_buf = param;
+    rc = mCamOps->set_stream_parms(mCamHandle,
+                                   mChannelHandle,
+                                   mHandle,
+                                   &mStreamInfo->parm_buf);
+    if (rc == NO_ERROR) {
+        param = mStreamInfo->parm_buf;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseFrameData
+ *
+ * DESCRIPTION: callback function to release frame data node
+ *
+ * PARAMETERS :
+ *   @data      : ptr to post process input data
+ *   @user_data : user data ptr (QCameraReprocessor)
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera3Stream::releaseFrameData(void *data, void *user_data)
+{
+    QCamera3Stream *pme = (QCamera3Stream *)user_data;
+    mm_camera_super_buf_t *frame = (mm_camera_super_buf_t *)data;
+    if (NULL != pme) {
+        if (UNLIKELY(pme->mBatchSize)) {
+            /* For batch mode, the batch buffer is added to empty list */
+            if(!pme->mFreeBatchBufQ.enqueue((void*) frame->bufs[0])) {
+                ALOGE("%s: batchBuf.buf_idx: %d enqueue failed", __func__,
+                        frame->bufs[0]->buf_idx);
+            }
+        } else {
+            pme->bufDone(frame->bufs[0]->buf_idx);
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : getBatchBufs
+ *
+ * DESCRIPTION: allocate batch containers for the stream
+ *
+ * PARAMETERS :
+ *   @num_bufs   : number of buffers allocated
+ *   @initial_reg_flag: flag to indicate if buffer needs to be registered
+ *                      at kernel initially
+ *   @bufs       : output of allocated buffers
+  *  @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::getBatchBufs(
+        uint8_t *num_bufs, uint8_t **initial_reg_flag,
+        mm_camera_buf_def_t **bufs,
+        mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int rc = NO_ERROR;
+    uint8_t *regFlags;
+
+    if (!ops_tbl || !num_bufs || !initial_reg_flag || !bufs) {
+        ALOGE("%s: input args NULL", __func__);
+        return INVALID_OPERATION;
+    }
+    CDBG_HIGH("%s : Batch container allocation stream type = %d",
+            __func__, getMyType());
+
+    Mutex::Autolock lock(mLock);
+
+    mMemOps = ops_tbl;
+
+    //Allocate batch containers
+    mStreamBatchBufs = new QCamera3HeapMemory(1);
+    if (!mStreamBatchBufs) {
+        ALOGE("%s: unable to create batch container memory", __func__);
+        return NO_MEMORY;
+    }
+    // Allocating single buffer file-descriptor for all batch containers,
+    // mStreamBatchBufs considers all the container bufs as a single buffer. But
+    // QCamera3Stream manages that single buffer as multiple batch buffers
+    CDBG("%s: Allocating batch container memory. numBatch: %d size: %d",
+            __func__, mNumBatchBufs, mStreamInfo->user_buf_info.size);
+    rc = mStreamBatchBufs->allocate(
+            mNumBatchBufs * mStreamInfo->user_buf_info.size);
+    if (rc < 0) {
+        ALOGE("%s: unable to allocate batch container memory", __func__);
+        rc = NO_MEMORY;
+        goto err1;
+    }
+
+    /* map batch buffers. getCnt here returns 1 because of single FD across
+     * batch bufs */
+    for (uint32_t i = 0; i < mStreamBatchBufs->getCnt(); i++) {
+        if (mNumBatchBufs) {
+            //For USER_BUF, size = number_of_container bufs instead of the total
+            //buf size
+            rc = ops_tbl->map_ops(i, -1, mStreamBatchBufs->getFd(i),
+                    (size_t)mNumBatchBufs, CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF,
+                    ops_tbl->userdata);
+            if (rc < 0) {
+                ALOGE("%s: Failed to map stream container buffer: %d",
+                        __func__, rc);
+                //Unmap all the buffers that were successfully mapped before
+                //this buffer mapping failed
+                for (size_t j = 0; j < i; j++) {
+                    ops_tbl->unmap_ops(j, -1,
+                            CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF,
+                            ops_tbl->userdata);
+                }
+                goto err2;
+            }
+        } else {
+            ALOGE("Failed to retrieve buffer size (bad index)");
+            return INVALID_OPERATION;
+        }
+    }
+
+    CDBG ("%s: batch bufs successfully mmapped = %d",
+            __func__, mNumBatchBufs);
+
+    /* regFlags array is allocated here, but consumed and freed by
+     * mm-camera-interface */
+    regFlags = (uint8_t *)malloc(sizeof(uint8_t) * mNumBatchBufs);
+    if (!regFlags) {
+        ALOGE("%s:%d Out of memory", __func__, __LINE__);
+        rc = NO_MEMORY;
+        goto err3;
+    }
+    memset(regFlags, 0, sizeof(uint8_t) * mNumBatchBufs);
+    /* Do not queue the container buffers as the image buffers are not yet
+     * queued. mStreamBatchBufs->getRegFlags is not called as mStreamBatchBufs
+     * considers single buffer is allocated */
+    for (uint32_t i = 0; i < mNumBatchBufs; i++) {
+        regFlags[i] = 0;
+    }
+
+    mBatchBufDefs = (mm_camera_buf_def_t *)
+            malloc(mNumBatchBufs * sizeof(mm_camera_buf_def_t));
+    if (mBatchBufDefs == NULL) {
+        ALOGE("%s:%d mBatchBufDefs memory allocation failed",
+                __func__, __LINE__);
+        rc = INVALID_OPERATION;
+        goto err4;
+    }
+    memset(mBatchBufDefs, 0, mNumBatchBufs * sizeof(mm_camera_buf_def_t));
+
+    //Populate bufDef and queue to free batchBufQ
+    for (uint32_t i = 0; i < mNumBatchBufs; i++) {
+        getBatchBufDef(mBatchBufDefs[i], i);
+        if(mFreeBatchBufQ.enqueue((void*) &mBatchBufDefs[i])) {
+            CDBG("%s: mBatchBufDefs[%d]: 0x%p", __func__, i, &mBatchBufDefs[i]);
+        } else {
+            ALOGE("%s: enqueue mBatchBufDefs[%d] failed", __func__, i);
+        }
+    }
+
+    *num_bufs = mNumBatchBufs;
+    *initial_reg_flag = regFlags;
+    *bufs = mBatchBufDefs;
+    CDBG_HIGH("%s: stream type: %d, numBufs(batch): %d",
+            __func__, mStreamInfo->stream_type, mNumBatchBufs);
+
+    return NO_ERROR;
+err4:
+    free(regFlags);
+err3:
+    for (size_t i = 0; i < mStreamBatchBufs->getCnt(); i++) {
+        ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF,
+                ops_tbl->userdata);
+    }
+err2:
+    mStreamBatchBufs->deallocate();
+err1:
+    delete mStreamBatchBufs;
+    mStreamBatchBufs = NULL;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : putBatchBufs
+ *
+ * DESCRIPTION: deallocate stream batch buffers
+ *
+ * PARAMETERS :
+ *   @ops_tbl    : ptr to buf mapping/unmapping ops
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::putBatchBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl)
+{
+    int rc = NO_ERROR;
+    Mutex::Autolock lock(mLock);
+
+    if (mStreamBatchBufs) {
+        for (uint32_t i = 0; i < mStreamBatchBufs->getCnt(); i++) {
+            rc = ops_tbl->unmap_ops(i, -1, CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF,
+                    ops_tbl->userdata);
+            if (rc < 0) {
+                ALOGE("%s: un-map batch buf failed: %d", __func__, rc);
+            }
+        }
+        mStreamBatchBufs->deallocate();
+        delete mStreamBatchBufs;
+        mStreamBatchBufs = NULL;
+    }
+    // mm-camera-interface frees bufDefs even though bufDefs are allocated by
+    // QCamera3Stream. Don't free here
+    mBatchBufDefs = NULL;
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : getBatchBufDef
+ *
+ * DESCRIPTION: query detailed buffer information of batch buffer
+ *
+ * PARAMETERS :
+ *   @bufDef  : [output] reference to struct to store buffer definition
+ *   @@index  : [input] index of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::getBatchBufDef(mm_camera_buf_def_t& batchBufDef,
+        int32_t index)
+{
+    int rc = NO_ERROR;
+    memset(&batchBufDef, 0, sizeof(mm_camera_buf_def_t));
+    if (mStreamBatchBufs) {
+        //Single file descriptor for all batch buffers
+        batchBufDef.fd          = mStreamBatchBufs->getFd(0);
+        batchBufDef.buf_type    = CAM_STREAM_BUF_TYPE_USERPTR;
+        batchBufDef.frame_len   = mStreamInfo->user_buf_info.size;
+        batchBufDef.mem_info    = mStreamBatchBufs;
+        batchBufDef.buffer      = (uint8_t *)mStreamBatchBufs->getPtr(0) +
+                                    (index * mStreamInfo->user_buf_info.size);
+        batchBufDef.buf_idx     = index;
+        batchBufDef.user_buf.num_buffers = mBatchSize;
+        batchBufDef.user_buf.bufs_used = 0;
+        batchBufDef.user_buf.plane_buf = mBufDefs;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : aggregateBufToBatch
+ *
+ * DESCRIPTION: queue batch container to downstream.
+ *
+ * PARAMETERS :
+ *   @bufDef : image buffer to be aggregated into batch
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success always
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::aggregateBufToBatch(mm_camera_buf_def_t& bufDef)
+{
+    int32_t rc = NO_ERROR;
+
+    if (UNLIKELY(!mBatchSize)) {
+        ALOGE("%s: Batch mod is not enabled", __func__);
+        return INVALID_OPERATION;
+    }
+    if (!mCurrentBatchBufDef) {
+        mCurrentBatchBufDef = (mm_camera_buf_def_t *)mFreeBatchBufQ.dequeue();
+        if (!mCurrentBatchBufDef) {
+            ALOGE("%s: No empty batch buffers is available", __func__);
+            return NO_MEMORY;
+        }
+        CDBG("%s: batch buffer: %d dequeued from empty buffer list", __func__,
+                mCurrentBatchBufDef->buf_idx);
+    }
+    if (mBufsStaged == mCurrentBatchBufDef->user_buf.num_buffers) {
+        ALOGE("%s: batch buffer is already full", __func__);
+        return NO_MEMORY;
+    }
+
+    mCurrentBatchBufDef->user_buf.buf_idx[mBufsStaged] = bufDef.buf_idx;
+    mBufsStaged++;
+    CDBG("%s: buffer id: %d aggregated into batch buffer id: %d",
+            __func__, bufDef.buf_idx, mCurrentBatchBufDef->buf_idx);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : queueBatchBuf
+ *
+ * DESCRIPTION: queue batch container to downstream.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success always
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::queueBatchBuf()
+{
+    int32_t rc = NO_ERROR;
+    struct msm_camera_user_buf_cont_t *cont_buf = NULL;
+
+    if (!mCurrentBatchBufDef) {
+        ALOGE("%s: No buffers were queued into batch", __func__);
+        return INVALID_OPERATION;
+    }
+    //bufs_used: number of valid buffers in the batch buffers
+    mCurrentBatchBufDef->user_buf.bufs_used = mBufsStaged;
+
+    //if mBufsStaged < num_buffers, initialize the buf_idx to -1 for rest of the
+    //buffers
+    for (size_t i = mBufsStaged; i < mCurrentBatchBufDef->user_buf.num_buffers;
+            i++) {
+        mCurrentBatchBufDef->user_buf.buf_idx[i] = -1;
+    }
+
+    rc = mCamOps->qbuf(mCamHandle, mChannelHandle, mCurrentBatchBufDef);
+    if (rc < 0) {
+        ALOGE("%s: queueing of batch buffer: %d failed with err: %d", __func__,
+                mCurrentBatchBufDef->buf_idx, rc);
+        return FAILED_TRANSACTION;
+    }
+    CDBG("%s Batch buf id: %d queued. bufs_used: %d", __func__,
+            mCurrentBatchBufDef->buf_idx,
+            mCurrentBatchBufDef->user_buf.bufs_used);
+
+    mCurrentBatchBufDef = NULL;
+    mBufsStaged = 0;
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : handleBatchBuffer
+ *
+ * DESCRIPTION: separate individual buffers from the batch and issue callback
+ *
+ * PARAMETERS :
+ *   @superBuf : Received superbuf containing batch buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success always
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3Stream::handleBatchBuffer(mm_camera_super_buf_t *superBuf)
+{
+    int32_t rc = NO_ERROR;
+    mm_camera_super_buf_t *frame;
+    mm_camera_buf_def_t batchBuf;
+
+    if (LIKELY(!mBatchSize)) {
+        ALOGE("%s: Stream: %d not in batch mode, but batch buffer received",
+                __func__, getMyType());
+        return INVALID_OPERATION;
+    }
+    if (!mDataCB) {
+        ALOGE("%s: Data callback not set for batch mode", __func__);
+        return BAD_VALUE;
+    }
+    if (!superBuf->bufs[0]) {
+        ALOGE("%s: superBuf->bufs[0] is NULL!!", __func__);
+        return BAD_VALUE;
+    }
+
+    /* Copy the batch buffer to local and queue the batch buffer to  empty queue
+     * to handle the new requests received while callbacks are in progress */
+    batchBuf = *superBuf->bufs[0];
+    if (!mFreeBatchBufQ.enqueue((void*) superBuf->bufs[0])) {
+        ALOGE("%s: batchBuf.buf_idx: %d enqueue failed", __func__,
+                batchBuf.buf_idx);
+        free(superBuf);
+        return NO_MEMORY;
+    }
+    CDBG("%s: Received batch buffer: %d bufs_used: %d", __func__,
+            batchBuf.buf_idx, batchBuf.user_buf.bufs_used);
+    //dummy local bufDef to issue multiple callbacks
+    mm_camera_buf_def_t buf;
+    memset(&buf, 0, sizeof(mm_camera_buf_def_t));
+
+    for (size_t i = 0; i < batchBuf.user_buf.bufs_used; i++) {
+        int32_t buf_idx = batchBuf.user_buf.buf_idx[i];
+        buf = mBufDefs[buf_idx];
+
+        /* this memory is freed inside dataCB. Should not be freed here */
+        frame = (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+        if (!frame) {
+            ALOGE("%s:%d malloc failed. Buffers will be dropped",
+                    __func__, __LINE__);
+            break;
+        } else {
+            memcpy(frame, superBuf, sizeof(mm_camera_super_buf_t));
+            frame->bufs[0] = &buf;
+
+            mDataCB(frame, this, mUserData);
+        }
+    }
+    CDBG("%s: batch buffer: %d callbacks done", __func__,
+            batchBuf.buf_idx);
+
+    free(superBuf);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : flushFreeBatchBufQ
+ *
+ * DESCRIPTION: dequeue all the entries of mFreeBatchBufQ and call flush.
+ *              QCameraQueue::flush calls 'free(node->data)' which should be
+ *              avoided for mFreeBatchBufQ as the entries are not allocated
+ *              during each enqueue
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCamera3Stream::flushFreeBatchBufQ()
+{
+    while (!mFreeBatchBufQ.isEmpty()) {
+        mFreeBatchBufQ.dequeue();
+    }
+    mFreeBatchBufQ.flush();
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/HAL3/QCamera3Stream.h b/camera/QCamera2/HAL3/QCamera3Stream.h
new file mode 100644
index 0000000..b3abbed
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3Stream.h
@@ -0,0 +1,164 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA3_STREAM_H__
+#define __QCAMERA3_STREAM_H__
+
+#include <hardware/camera3.h>
+#include "utils/Mutex.h"
+#include "QCameraCmdThread.h"
+#include "QCamera3Mem.h"
+#include "QCamera3StreamMem.h"
+
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+namespace qcamera {
+
+class QCamera3Stream;
+class QCamera3Channel;
+
+typedef void (*hal3_stream_cb_routine)(mm_camera_super_buf_t *frame,
+                                  QCamera3Stream *stream,
+                                  void *userdata);
+
+class QCamera3Stream
+{
+public:
+    QCamera3Stream(uint32_t camHandle,
+                  uint32_t chId,
+                  mm_camera_ops_t *camOps,
+                  cam_padding_info_t *paddingInfo,
+                  QCamera3Channel *channel);
+    virtual ~QCamera3Stream();
+    virtual int32_t init(cam_stream_type_t streamType,
+                         cam_format_t streamFormat,
+                         cam_dimension_t streamDim,
+                         cam_rotation_t streamRotation,
+                         cam_stream_reproc_config_t* reprocess_config,
+                         uint8_t minStreamBufNum,
+                         uint32_t postprocess_mask,
+                         cam_is_type_t is_type,
+                         uint32_t batchSize,
+                         hal3_stream_cb_routine stream_cb,
+                         void *userdata);
+    virtual int32_t bufDone(uint32_t index);
+    virtual int32_t bufRelease(int32_t index);
+    virtual int32_t processDataNotify(mm_camera_super_buf_t *bufs);
+    virtual int32_t start();
+    virtual int32_t stop();
+    virtual int32_t queueBatchBuf();
+
+    static void dataNotifyCB(mm_camera_super_buf_t *recvd_frame, void *userdata);
+    static void *dataProcRoutine(void *data);
+    uint32_t getMyHandle() const {return mHandle;}
+    cam_stream_type_t getMyType() const;
+    int32_t getFrameOffset(cam_frame_len_offset_t &offset);
+    int32_t getFrameDimension(cam_dimension_t &dim);
+    int32_t getFormat(cam_format_t &fmt);
+    QCamera3StreamMem *getStreamBufs() {return mStreamBufs;};
+    uint32_t getMyServerID();
+
+    int32_t mapBuf(uint8_t buf_type, uint32_t buf_idx,
+            int32_t plane_idx, int fd, size_t size);
+    int32_t unmapBuf(uint8_t buf_type, uint32_t buf_idx, int32_t plane_idx);
+    int32_t setParameter(cam_stream_parm_buffer_t &param);
+
+    static void releaseFrameData(void *data, void *user_data);
+
+private:
+    uint32_t mCamHandle;
+    uint32_t mChannelHandle;
+    uint32_t mHandle; // stream handle from mm-camera-interface
+    mm_camera_ops_t *mCamOps;
+    cam_stream_info_t *mStreamInfo; // ptr to stream info buf
+    mm_camera_stream_mem_vtbl_t mMemVtbl;
+    mm_camera_map_unmap_ops_tbl_t *mMemOps;
+    uint8_t mNumBufs;
+    hal3_stream_cb_routine mDataCB;
+    void *mUserData;
+
+    QCameraQueue     mDataQ;
+    QCameraCmdThread mProcTh; // thread for dataCB
+
+    QCamera3HeapMemory *mStreamInfoBuf;
+    QCamera3StreamMem *mStreamBufs;
+    mm_camera_buf_def_t *mBufDefs;
+    cam_frame_len_offset_t mFrameLenOffset;
+    cam_padding_info_t mPaddingInfo;
+    QCamera3Channel *mChannel;
+    Mutex mLock;    //Lock controlling access to 'mBufDefs'
+
+    uint32_t mBatchSize; // 0: No batch, non-0: Number of imaage bufs in a batch
+    uint8_t mNumBatchBufs; //Number of batch buffers which can hold image bufs
+    QCamera3HeapMemory *mStreamBatchBufs; //Pointer to batch buffers memory
+    mm_camera_buf_def_t *mBatchBufDefs; //Pointer to array of batch bufDefs
+    mm_camera_buf_def_t *mCurrentBatchBufDef; //batch buffer in progress during
+                                              //aggregation
+    uint32_t    mBufsStaged; //Number of image buffers aggregated into
+                             //currentBatchBufDef
+    QCameraQueue mFreeBatchBufQ; //Buffer queue containing empty batch buffers
+
+    static int32_t get_bufs(
+                     cam_frame_len_offset_t *offset,
+                     uint8_t *num_bufs,
+                     uint8_t **initial_reg_flag,
+                     mm_camera_buf_def_t **bufs,
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                     void *user_data);
+    static int32_t put_bufs(
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                     void *user_data);
+    static int32_t invalidate_buf(uint32_t index, void *user_data);
+    static int32_t clean_invalidate_buf(uint32_t index, void *user_data);
+
+    int32_t getBufs(cam_frame_len_offset_t *offset,
+                     uint8_t *num_bufs,
+                     uint8_t **initial_reg_flag,
+                     mm_camera_buf_def_t **bufs,
+                     mm_camera_map_unmap_ops_tbl_t *ops_tbl);
+    int32_t putBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl);
+    int32_t invalidateBuf(uint32_t index);
+    int32_t cleanInvalidateBuf(uint32_t index);
+    int32_t getBatchBufs(
+            uint8_t *num_bufs, uint8_t **initial_reg_flag,
+            mm_camera_buf_def_t **bufs,
+            mm_camera_map_unmap_ops_tbl_t *ops_tbl);
+    int32_t putBatchBufs(mm_camera_map_unmap_ops_tbl_t *ops_tbl);
+    int32_t getBatchBufDef(mm_camera_buf_def_t& batchBufDef,
+            int32_t index);
+    int32_t aggregateBufToBatch(mm_camera_buf_def_t& bufDef);
+    int32_t handleBatchBuffer(mm_camera_super_buf_t *superBuf);
+    void flushFreeBatchBufQ();
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA3_STREAM_H__ */
diff --git a/camera/QCamera2/HAL3/QCamera3StreamMem.cpp b/camera/QCamera2/HAL3/QCamera3StreamMem.cpp
new file mode 100644
index 0000000..4c8fc52
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3StreamMem.cpp
@@ -0,0 +1,485 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define LOG_TAG "QCamera3StreamMem"
+
+#include <string.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <utils/Log.h>
+#include <utils/Errors.h>
+#include <gralloc_priv.h>
+#include <qdMetaData.h>
+#include "QCamera3Mem.h"
+#include "QCamera3HWI.h"
+
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+using namespace android;
+
+namespace qcamera {
+
+/*===========================================================================
+ * FUNCTION   : QCamera3StreamMem
+ *
+ * DESCRIPTION: default constructor of QCamera3StreamMem
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3StreamMem::QCamera3StreamMem(uint32_t maxHeapBuffer, bool queueHeapBuffers) :
+        mHeapMem(maxHeapBuffer),
+        mGrallocMem(maxHeapBuffer),
+        mMaxHeapBuffers(maxHeapBuffer),
+        mQueueHeapBuffers(queueHeapBuffers)
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : QCamera3StreamMem
+ *
+ * DESCRIPTION: destructor of QCamera3StreamMem
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera3StreamMem::~QCamera3StreamMem()
+{
+    clear();
+}
+
+/*===========================================================================
+ * FUNCTION   : getCnt
+ *
+ * DESCRIPTION: query number of buffers allocated/registered
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of buffers allocated
+ *==========================================================================*/
+uint32_t QCamera3StreamMem::getCnt()
+{
+    Mutex::Autolock lock(mLock);
+
+    return (mHeapMem.getCnt() + mGrallocMem.getCnt());
+}
+
+/*===========================================================================
+ * FUNCTION   : getRegFlags
+ *
+ * DESCRIPTION: query initial reg flags
+ *
+ * PARAMETERS :
+ *   @regFlags: initial reg flags of the allocated/registered buffers
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3StreamMem::getRegFlags(uint8_t * regFlags)
+{
+    // Assume that all buffers allocated can be queued.
+    for (uint32_t i = 0; i < mHeapMem.getCnt(); i ++)
+        regFlags[i] = (mQueueHeapBuffers ? 1 : 0);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getFd
+ *
+ * DESCRIPTION: return file descriptor of the indexed buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : file descriptor
+ *==========================================================================*/
+int QCamera3StreamMem::getFd(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index < mMaxHeapBuffers)
+        return mHeapMem.getFd(index);
+    else
+        return mGrallocMem.getFd(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : getSize
+ *
+ * DESCRIPTION: return buffer size of the indexed buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer size
+ *==========================================================================*/
+ssize_t QCamera3StreamMem::getSize(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index < mMaxHeapBuffers)
+        return mHeapMem.getSize(index);
+    else
+        return mGrallocMem.getSize(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : invalidateCache
+ *
+ * DESCRIPTION: invalidate the cache of the indexed buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3StreamMem::invalidateCache(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index < mMaxHeapBuffers)
+        return mHeapMem.invalidateCache(index);
+    else
+        return mGrallocMem.invalidateCache(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : cleanInvalidateCache
+ *
+ * DESCRIPTION: clean and invalidate the cache of the indexed buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3StreamMem::cleanInvalidateCache(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index < mMaxHeapBuffers)
+        return mHeapMem.cleanInvalidateCache(index);
+    else
+        return mGrallocMem.cleanInvalidateCache(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufDef
+ *
+ * DESCRIPTION: query detailed buffer information
+ *
+ * PARAMETERS :
+ *   @offset  : [input] frame buffer offset
+ *   @bufDef  : [output] reference to struct to store buffer definition
+ *   @index   : [input] index of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3StreamMem::getBufDef(const cam_frame_len_offset_t &offset,
+        mm_camera_buf_def_t &bufDef, uint32_t index)
+{
+    int32_t ret = NO_ERROR;
+
+    if (index < mMaxHeapBuffers)
+        ret = mHeapMem.getBufDef(offset, bufDef, index);
+    else
+        ret = mGrallocMem.getBufDef(offset, bufDef, index);
+
+    bufDef.mem_info = (void *)this;
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : getPtr
+ *
+ * DESCRIPTION: return virtual address of the indexed buffer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : virtual address
+ *==========================================================================*/
+void* QCamera3StreamMem::getPtr(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index < mMaxHeapBuffers)
+        return mHeapMem.getPtr(index);
+    else
+        return mGrallocMem.getPtr(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : valid
+ *
+ * DESCRIPTION: return whether there is a valid buffer at the current index
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : true if there is a buffer, false otherwise
+ *==========================================================================*/
+bool QCamera3StreamMem::valid(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+
+    if (index < mMaxHeapBuffers)
+        return (mHeapMem.getSize(index) > 0);
+    else
+        return (mGrallocMem.getSize(index) > 0);
+}
+
+/*===========================================================================
+ * FUNCTION   : registerBuffer
+ *
+ * DESCRIPTION: registers frameworks-allocated gralloc buffer_handle_t
+ *
+ * PARAMETERS :
+ *   @buffers : buffer_handle_t pointer
+ *   @type :    cam_stream_type_t
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3StreamMem::registerBuffer(buffer_handle_t *buffer,
+        cam_stream_type_t type)
+{
+    Mutex::Autolock lock(mLock);
+    return mGrallocMem.registerBuffer(buffer, type);
+}
+
+
+/*===========================================================================
+ * FUNCTION   : unregisterBuffer
+ *
+ * DESCRIPTION: unregister buffer
+ *
+ * PARAMETERS :
+ *   @idx     : unregister buffer at index 'idx'
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3StreamMem::unregisterBuffer(size_t idx)
+{
+    Mutex::Autolock lock(mLock);
+    return mGrallocMem.unregisterBuffer(idx);
+}
+
+/*===========================================================================
+ * FUNCTION   : getMatchBufIndex
+ *
+ * DESCRIPTION: query buffer index by object ptr
+ *
+ * PARAMETERS :
+ *   @opaque  : opaque ptr
+ *
+ * RETURN     : buffer index if match found,
+ *              -1 if failed
+ *==========================================================================*/
+int QCamera3StreamMem::getMatchBufIndex(void *object)
+{
+    Mutex::Autolock lock(mLock);
+    return mGrallocMem.getMatchBufIndex(object);
+}
+
+/*===========================================================================
+ * FUNCTION   : getBufferHandle
+ *
+ * DESCRIPTION: return framework pointer
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : buffer ptr if match found
+                NULL if failed
+ *==========================================================================*/
+void *QCamera3StreamMem::getBufferHandle(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+    return mGrallocMem.getBufferHandle(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : unregisterBuffers
+ *
+ * DESCRIPTION: unregister buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3StreamMem::unregisterBuffers()
+{
+    Mutex::Autolock lock(mLock);
+    mGrallocMem.unregisterBuffers();
+}
+
+
+/*===========================================================================
+ * FUNCTION   : allocate
+ *
+ * DESCRIPTION: allocate requested number of buffers of certain size
+ *
+ * PARAMETERS :
+ *   @count   : number of buffers to be allocated
+ *   @size    : lenght of the buffer to be allocated
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera3StreamMem::allocateAll(size_t size)
+{
+    Mutex::Autolock lock(mLock);
+    return mHeapMem.allocate(size);
+}
+
+int QCamera3StreamMem::allocateOne(size_t size)
+{
+    Mutex::Autolock lock(mLock);
+    return mHeapMem.allocateOne(size);
+}
+
+/*===========================================================================
+ * FUNCTION   : deallocate
+ *
+ * DESCRIPTION: deallocate heap buffers
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void QCamera3StreamMem::deallocate()
+{
+    Mutex::Autolock lock(mLock);
+    mHeapMem.deallocate();
+}
+
+/*===========================================================================
+ * FUNCTION   : markFrameNumber
+ *
+ * DESCRIPTION: We use this function from the request call path to mark the
+ *              buffers with the frame number they are intended for this info
+ *              is used later when giving out callback & it is duty of PP to
+ *              ensure that data for that particular frameNumber/Request is
+ *              written to this buffer.
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *   @frame#  : Frame number from the framework
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCamera3StreamMem::markFrameNumber(uint32_t index, uint32_t frameNumber)
+{
+    Mutex::Autolock lock(mLock);
+    if (index < mMaxHeapBuffers)
+        return mHeapMem.markFrameNumber(index, frameNumber);
+    else
+        return mGrallocMem.markFrameNumber(index, frameNumber);
+}
+
+/*===========================================================================
+ * FUNCTION   : getFrameNumber
+ *
+ * DESCRIPTION: We use this to fetch the frameNumber for the request with which
+ *              this buffer was given to HAL
+ *
+ *
+ * PARAMETERS :
+ *   @index   : index of the buffer
+ *
+ * RETURN     : int32_t frameNumber
+ *              positive/zero  -- success
+ *              negative failure
+ *==========================================================================*/
+int32_t QCamera3StreamMem::getFrameNumber(uint32_t index)
+{
+    Mutex::Autolock lock(mLock);
+    if (index < mMaxHeapBuffers)
+        return mHeapMem.getFrameNumber(index);
+    else
+        return mGrallocMem.getFrameNumber(index);
+}
+
+/*===========================================================================
+ * FUNCTION   : getGrallocBufferIndex
+ *
+ * DESCRIPTION: We use this to fetch the gralloc buffer index based on frameNumber
+ *
+ * PARAMETERS :
+ *   @frameNumber : frame Number
+ *
+ * RETURN     : int32_t buffer index
+ *              positive/zero  -- success
+ *              negative failure
+ *==========================================================================*/
+int32_t QCamera3StreamMem::getGrallocBufferIndex(uint32_t frameNumber)
+{
+    Mutex::Autolock lock(mLock);
+    int32_t index = mGrallocMem.getBufferIndex(frameNumber);
+    return index;
+}
+
+/*===========================================================================
+ * FUNCTION   : getHeapBufferIndex
+ *
+ * DESCRIPTION: We use this to fetch the heap buffer index based on frameNumber
+ *
+ * PARAMETERS :
+ *   @frameNumber : frame Number
+ *
+ * RETURN     : int32_t buffer index
+ *              positive/zero  -- success
+ *              negative failure
+ *==========================================================================*/
+int32_t QCamera3StreamMem::getHeapBufferIndex(uint32_t frameNumber)
+{
+    Mutex::Autolock lock(mLock);
+    int32_t index = mHeapMem.getBufferIndex(frameNumber);
+    return index;
+}
+
+}; //namespace qcamera
diff --git a/camera/QCamera2/HAL3/QCamera3StreamMem.h b/camera/QCamera2/HAL3/QCamera3StreamMem.h
new file mode 100644
index 0000000..03214f6
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3StreamMem.h
@@ -0,0 +1,97 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA3_STREAMMEM_H__
+#define __QCAMERA3_STREAMMEM_H__
+
+#include <hardware/camera3.h>
+#include <utils/Mutex.h>
+#include "QCamera3Mem.h"
+
+extern "C" {
+#include <sys/types.h>
+#include <linux/msm_ion.h>
+#include <mm_camera_interface.h>
+}
+
+using namespace android;
+
+namespace qcamera {
+
+class QCamera3StreamMem {
+public:
+    QCamera3StreamMem(uint32_t maxHeapBuffer, bool queueAll = true);
+    virtual ~QCamera3StreamMem();
+
+    uint32_t getCnt();
+    int getRegFlags(uint8_t *regFlags);
+
+    // Helper function to access individual QCamera3Buffer object
+    int getFd(uint32_t index);
+    ssize_t getSize(uint32_t index);
+    int invalidateCache(uint32_t index);
+    int cleanInvalidateCache(uint32_t index);
+    int32_t getBufDef(const cam_frame_len_offset_t &offset,
+            mm_camera_buf_def_t &bufDef, uint32_t index);
+    void *getPtr(uint32_t index);
+
+    bool valid(uint32_t index);
+
+    // Gralloc buffer related functions
+    int registerBuffer(buffer_handle_t *buffer, cam_stream_type_t type);
+    int unregisterBuffer(uint32_t index);
+    int getMatchBufIndex(void *object);
+    void *getBufferHandle(uint32_t index);
+    void unregisterBuffers(); //TODO: relace with unififed clear() function?
+
+    // Heap buffer related functions
+    int allocateAll(size_t size);
+    int allocateOne(size_t size);
+    void deallocate(); //TODO: replace with unified clear() function?
+
+    // Clear function: unregister for gralloc buffer, and deallocate for heap buffer
+    void clear() {unregisterBuffers(); deallocate(); }
+
+    // Frame number getter and setter
+    int32_t markFrameNumber(uint32_t index, uint32_t frameNumber);
+    int32_t getFrameNumber(uint32_t index);
+    int32_t getGrallocBufferIndex(uint32_t frameNumber);
+    int32_t getHeapBufferIndex(uint32_t frameNumber);
+
+private:
+    //variables
+    QCamera3HeapMemory mHeapMem;
+    QCamera3GrallocMemory mGrallocMem;
+    uint32_t mMaxHeapBuffers;
+    Mutex mLock;
+    bool mQueueHeapBuffers;
+};
+
+};
+#endif // __QCAMERA3_STREAMMEM_H__
diff --git a/camera/QCamera2/HAL3/QCamera3VendorTags.cpp b/camera/QCamera2/HAL3/QCamera3VendorTags.cpp
new file mode 100644
index 0000000..91a7fcd
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3VendorTags.cpp
@@ -0,0 +1,370 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define LOG_TAG "QCamera3VendorTags"
+//#define LOG_NDEBUG 0
+
+#include <hardware/camera3.h>
+#include <utils/Log.h>
+#include <utils/Errors.h>
+#include "QCamera3HWI.h"
+#include "QCamera3VendorTags.h"
+
+using namespace android;
+
+namespace qcamera {
+
+const int QCAMERA3_SECTION_COUNT = QCAMERA3_SECTIONS_END - VENDOR_SECTION;
+
+enum qcamera3_ext_tags qcamera3_ext3_section_bounds[QCAMERA3_SECTIONS_END -
+    VENDOR_SECTION] = {
+        QCAMERA3_PRIVATEDATA_END,
+        QCAMERA3_CDS_END,
+        QCAMERA3_OPAQUE_RAW_END,
+        QCAMERA3_CROP_END,
+        QCAMERA3_TUNING_META_DATA_END,
+        QCAMERA3_AV_TIMER_END,
+        QCAMERA3_SENSOR_META_DATA_END,
+        QCAMERA3_TEMPORAL_DENOISE_END,
+        NEXUS_EXPERIMENTAL_2015_END,
+} ;
+
+typedef struct vendor_tag_info {
+    const char *tag_name;
+    uint8_t     tag_type;
+} vendor_tag_info_t;
+
+const char *qcamera3_ext_section_names[QCAMERA3_SECTIONS_END -
+        VENDOR_SECTION] = {
+    "org.codeaurora.qcamera3.privatedata",
+    "org.codeaurora.qcamera3.CDS",
+    "org.codeaurora.qcamera3.opaque_raw",
+    "org.codeaurora.qcamera3.crop",
+    "org.codeaurora.qcamera3.tuning_meta_data",
+    "org.codeaurora.qcamera3.av_timer",
+    "org.codeaurora.qcamera3.sensor_meta_data",
+    "org.codeaurora.qcamera3.temporal_denoise",
+    "com.google.nexus.experimental2015"
+};
+
+vendor_tag_info_t qcamera3_privatedata[QCAMERA3_PRIVATEDATA_END - QCAMERA3_PRIVATEDATA_START] = {
+    { "privatedata_reprocess", TYPE_INT32 }
+};
+
+vendor_tag_info_t qcamera3_cds[QCAMERA3_CDS_END - QCAMERA3_CDS_START] = {
+    { "cds_mode", TYPE_INT32 },
+    { "cds_info", TYPE_BYTE }
+};
+
+vendor_tag_info_t qcamera3_opaque_raw[QCAMERA3_OPAQUE_RAW_END -
+        QCAMERA3_OPAQUE_RAW_START] = {
+    { "opaque_raw_strides", TYPE_INT32 },
+    { "opaque_raw_format", TYPE_BYTE }
+};
+
+vendor_tag_info_t qcamera3_crop[QCAMERA3_CROP_END- QCAMERA3_CROP_START] = {
+    { "count", TYPE_INT32 },
+    { "data", TYPE_INT32},
+    { "roimap", TYPE_INT32 }
+};
+
+vendor_tag_info_t qcamera3_tuning_meta_data[QCAMERA3_TUNING_META_DATA_END -
+        QCAMERA3_TUNING_META_DATA_START] = {
+    { "tuning_meta_data_blob", TYPE_INT32 }
+};
+
+vendor_tag_info qcamera3_av_timer[QCAMERA3_AV_TIMER_END -
+                                  QCAMERA3_AV_TIMER_START] = {
+   {"use_av_timer", TYPE_BYTE }
+};
+
+vendor_tag_info qcamera3_sensor_meta_data[QCAMERA3_SENSOR_META_DATA_END -
+                                  QCAMERA3_SENSOR_META_DATA_START] = {
+   {"dynamic_black_level_pattern", TYPE_FLOAT }
+};
+
+vendor_tag_info_t qcamera3_temporal_denoise[QCAMERA3_TEMPORAL_DENOISE_END -
+        QCAMERA3_TEMPORAL_DENOISE_START] = {
+    { "enable", TYPE_BYTE },
+    { "process_type", TYPE_INT32 }
+};
+
+vendor_tag_info_t nexus_experimental_2015[NEXUS_EXPERIMENTAL_2015_END -
+        NEXUS_EXPERIMENTAL_2015_START] = {
+    {"sensor.dynamicBlackLevel", TYPE_FLOAT },
+    {"sensor.info.opticallyShieldedRegions", TYPE_INT32 }
+};
+
+vendor_tag_info_t *qcamera3_tag_info[QCAMERA3_SECTIONS_END -
+        VENDOR_SECTION] = {
+    qcamera3_privatedata,
+    qcamera3_cds,
+    qcamera3_opaque_raw,
+    qcamera3_crop,
+    qcamera3_tuning_meta_data,
+    qcamera3_av_timer,
+    qcamera3_sensor_meta_data,
+    qcamera3_temporal_denoise,
+    nexus_experimental_2015,
+};
+
+uint32_t qcamera3_all_tags[] = {
+    // QCAMERA3_PRIVATEDATA
+    (uint32_t)QCAMERA3_PRIVATEDATA_REPROCESS,
+
+    // QCAMERA3_CDS
+    (uint32_t)QCAMERA3_CDS_MODE,
+    (uint32_t)QCAMERA3_CDS_INFO,
+
+    // QCAMERA3_OPAQUE_RAW
+    (uint32_t)QCAMERA3_OPAQUE_RAW_STRIDES,
+    (uint32_t)QCAMERA3_OPAQUE_RAW_FORMAT,
+
+    // QCAMERA3_CROP
+    (uint32_t)QCAMERA3_CROP_COUNT_REPROCESS,
+    (uint32_t)QCAMERA3_CROP_REPROCESS,
+    (uint32_t)QCAMERA3_CROP_ROI_MAP_REPROCESS,
+
+    // QCAMERA3_TUNING_META_DATA
+    (uint32_t)QCAMERA3_TUNING_META_DATA_BLOB,
+
+    //QCAMERA3_AVTIMER
+    (uint32_t)QCAMERA3_USE_AV_TIMER,
+
+    //QCAMERA3_SENSOR_META_DATA
+    (uint32_t)QCAMERA3_SENSOR_DYNAMIC_BLACK_LEVEL_PATTERN,
+
+    // QCAMERA3_TEMPORAL_DENOISE
+    (uint32_t)QCAMERA3_TEMPORAL_DENOISE_ENABLE,
+    (uint32_t)QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE,
+
+    //NEXUS_EXPERIMENTAL_2015
+    (uint32_t)NEXUS_EXPERIMENTAL_2015_SENSOR_DYNAMIC_BLACK_LEVEL,
+    (uint32_t)NEXUS_EXPERIMENTAL_2015_SENSOR_INFO_OPTICALLY_SHIELDED_REGIONS,
+};
+
+const vendor_tag_ops_t* QCamera3VendorTags::Ops = NULL;
+
+/*===========================================================================
+ * FUNCTION   : get_vendor_tag_ops
+ *
+ * DESCRIPTION: Get the metadata vendor tag function pointers
+ *
+ * PARAMETERS :
+ *    @ops   : function pointer table to be filled by HAL
+ *
+ *
+ * RETURN     : NONE
+ *==========================================================================*/
+void QCamera3VendorTags::get_vendor_tag_ops(
+                                vendor_tag_ops_t* ops)
+{
+    ALOGV("%s: E", __func__);
+
+    Ops = ops;
+
+    ops->get_tag_count = get_tag_count;
+    ops->get_all_tags = get_all_tags;
+    ops->get_section_name = get_section_name;
+    ops->get_tag_name = get_tag_name;
+    ops->get_tag_type = get_tag_type;
+    ops->reserved[0] = NULL;
+
+    ALOGV("%s: X", __func__);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : get_tag_count
+ *
+ * DESCRIPTION: Get number of vendor tags supported
+ *
+ * PARAMETERS :
+ *    @ops   :  Vendor tag ops data structure
+ *
+ *
+ * RETURN     : Number of vendor tags supported
+ *==========================================================================*/
+
+int QCamera3VendorTags::get_tag_count(
+                const vendor_tag_ops_t * ops)
+{
+    size_t count = 0;
+    if (ops == Ops)
+        count = sizeof(qcamera3_all_tags)/sizeof(qcamera3_all_tags[0]);
+
+    ALOGV("%s: count is %d", __func__, count);
+    return (int)count;
+}
+
+/*===========================================================================
+ * FUNCTION   : get_all_tags
+ *
+ * DESCRIPTION: Fill array with all supported vendor tags
+ *
+ * PARAMETERS :
+ *    @ops      :  Vendor tag ops data structure
+ *    @tag_array:  array of metadata tags
+ *
+ * RETURN     : Success: the section name of the specific tag
+ *              Failure: NULL
+ *==========================================================================*/
+void QCamera3VendorTags::get_all_tags(
+                const vendor_tag_ops_t * ops,
+                uint32_t *g_array)
+{
+    if (ops != Ops)
+        return;
+
+    for (size_t i = 0;
+            i < sizeof(qcamera3_all_tags)/sizeof(qcamera3_all_tags[0]);
+            i++) {
+        g_array[i] = qcamera3_all_tags[i];
+        CDBG("%s: g_array[%d] is %d", __func__, i, g_array[i]);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : get_section_name
+ *
+ * DESCRIPTION: Get section name for vendor tag
+ *
+ * PARAMETERS :
+ *    @ops   :  Vendor tag ops structure
+ *    @tag   :  Vendor specific tag
+ *
+ *
+ * RETURN     : Success: the section name of the specific tag
+ *              Failure: NULL
+ *==========================================================================*/
+
+const char* QCamera3VendorTags::get_section_name(
+                const vendor_tag_ops_t * ops,
+                uint32_t tag)
+{
+    ALOGV("%s: E", __func__);
+    if (ops != Ops)
+        return NULL;
+
+    const char *ret;
+    uint32_t section = tag >> 16;
+
+    if (section < VENDOR_SECTION || section >= QCAMERA3_SECTIONS_END)
+        ret = NULL;
+    else
+        ret = qcamera3_ext_section_names[section - VENDOR_SECTION];
+
+    if (ret)
+        ALOGV("%s: section_name[%d] is %s", __func__, tag, ret);
+    ALOGV("%s: X", __func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : get_tag_name
+ *
+ * DESCRIPTION: Get name of a vendor specific tag
+ *
+ * PARAMETERS :
+ *    @tag   :  Vendor specific tag
+ *
+ *
+ * RETURN     : Success: the name of the specific tag
+ *              Failure: NULL
+ *==========================================================================*/
+const char* QCamera3VendorTags::get_tag_name(
+                const vendor_tag_ops_t * ops,
+                uint32_t tag)
+{
+    ALOGV("%s: E", __func__);
+    const char *ret;
+    uint32_t section = tag >> 16;
+    uint32_t section_index = section - VENDOR_SECTION;
+    uint32_t tag_index = tag & 0xFFFF;
+
+    if (ops != Ops) {
+        ret = NULL;
+        goto done;
+    }
+
+    if (section < VENDOR_SECTION || section >= QCAMERA3_SECTIONS_END)
+        ret = NULL;
+    else if (tag >= (uint32_t)qcamera3_ext3_section_bounds[section_index])
+        ret = NULL;
+    else
+        ret = qcamera3_tag_info[section_index][tag_index].tag_name;
+
+    if (ret)
+        ALOGV("%s: tag name for tag %d is %s", __func__, tag, ret);
+    ALOGV("%s: X", __func__);
+
+done:
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : get_tag_type
+ *
+ * DESCRIPTION: Get type of a vendor specific tag
+ *
+ * PARAMETERS :
+ *    @tag   :  Vendor specific tag
+ *
+ *
+ * RETURN     : Success: the type of the specific tag
+ *              Failure: -1
+ *==========================================================================*/
+int QCamera3VendorTags::get_tag_type(
+                const vendor_tag_ops_t *ops,
+                uint32_t tag)
+{
+    ALOGV("%s: E", __func__);
+    int ret;
+    uint32_t section = tag >> 16;
+    uint32_t section_index = section - VENDOR_SECTION;
+    uint32_t tag_index = tag & 0xFFFF;
+
+    if (ops != Ops) {
+        ret = -1;
+        goto done;
+    }
+    if (section < VENDOR_SECTION || section >= QCAMERA3_SECTIONS_END)
+        ret = -1;
+    else if (tag >= (uint32_t )qcamera3_ext3_section_bounds[section_index])
+        ret = -1;
+    else
+        ret = qcamera3_tag_info[section_index][tag_index].tag_type;
+
+    ALOGV("%s: tag type for tag %d is %d", __func__, tag, ret);
+    ALOGV("%s: X", __func__);
+done:
+    return ret;
+}
+
+}; //end namespace qcamera
diff --git a/camera/QCamera2/HAL3/QCamera3VendorTags.h b/camera/QCamera2/HAL3/QCamera3VendorTags.h
new file mode 100644
index 0000000..601b448
--- /dev/null
+++ b/camera/QCamera2/HAL3/QCamera3VendorTags.h
@@ -0,0 +1,172 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#ifndef __QCAMERA3VENDORTAGS_H__
+#define __QCAMERA3VENDORTAGS_H__
+
+namespace qcamera {
+
+enum qcamera3_ext_section {
+    QCAMERA3_PRIVATEDATA = VENDOR_SECTION,
+    QCAMERA3_CDS,
+    QCAMERA3_OPAQUE_RAW,
+    QCAMERA3_CROP,
+    QCAMERA3_TUNING_META_DATA,
+    QCAMERA3_AV_TIMER,
+    QCAMERA3_SENSOR_META_DATA,
+    QCAMERA3_TEMPORAL_DENOISE,
+    NEXUS_EXPERIMENTAL_2015,
+    QCAMERA3_SECTIONS_END
+};
+
+enum qcamera3_ext_section_ranges {
+    QCAMERA3_PRIVATEDATA_START = QCAMERA3_PRIVATEDATA << 16,
+    QCAMERA3_CDS_START = QCAMERA3_CDS << 16,
+    QCAMERA3_OPAQUE_RAW_START = QCAMERA3_OPAQUE_RAW << 16,
+    QCAMERA3_CROP_START = QCAMERA3_CROP << 16,
+    QCAMERA3_TUNING_META_DATA_START = QCAMERA3_TUNING_META_DATA << 16,
+    QCAMERA3_AV_TIMER_START = QCAMERA3_AV_TIMER << 16,
+    QCAMERA3_SENSOR_META_DATA_START = QCAMERA3_SENSOR_META_DATA << 16,
+    QCAMERA3_TEMPORAL_DENOISE_START = QCAMERA3_TEMPORAL_DENOISE << 16,
+    NEXUS_EXPERIMENTAL_2015_START = NEXUS_EXPERIMENTAL_2015 <<16
+};
+
+enum qcamera3_ext_tags {
+    QCAMERA3_PRIVATEDATA_REPROCESS = QCAMERA3_PRIVATEDATA_START,
+    QCAMERA3_PRIVATEDATA_END,
+    QCAMERA3_CDS_MODE = QCAMERA3_CDS_START,
+    QCAMERA3_CDS_INFO,
+    QCAMERA3_CDS_END,
+
+    //Property Name:  org.codeaurora.qcamera3.opaque_raw.opaque_raw_strides
+    //
+    //Type: int32 * n * 3 [public]
+    //
+    //Description: Distance in bytes from the beginning of one row of opaque
+    //raw image data to the beginning of next row.
+    //
+    //Details: The strides are listed as (raw_width, raw_height, stride)
+    //triplets. For each supported raw size, there will be a stride associated
+    //with it.
+    QCAMERA3_OPAQUE_RAW_STRIDES = QCAMERA3_OPAQUE_RAW_START,
+
+    //Property Name: org.codeaurora.qcamera3.opaque_raw.opaque_raw_format
+    //
+    //Type: byte(enum) [public]
+    //  * LEGACY - The legacy raw format where 8, 10, or 12-bit
+    //    raw data is packed into a 64-bit word.
+    //  * MIPI - raw format matching the data packing described
+    //    in MIPI CSI-2 specification. In memory, the data
+    //    is constructed by packing sequentially received pixels
+    //    into least significant parts of the words first.
+    //    Within each pixel, the least significant bits are also
+    //    placed towards the least significant part of the word.
+    //
+    //Details: Lay out of opaque raw data in memory is decided by two factors:
+    //         opaque_raw_format and bit depth (implied by whiteLevel). Below
+    //         list illustrates their relationship:
+    //  LEGACY8:  P7(7:0) P6(7:0) P5(7:0) P4(7:0) P3(7:0) P2(7:0) P1(7:0) P0(7:0)
+    //            8 pixels occupy 8 bytes, no padding needed
+    //            min_stride = CEILING8(raw_width)
+    // LEGACY10:  0000 P5(9:0) P4(9:0) P3(9:0) P2(9:0) P1(9:0) P0(9:0)
+    //            6 pixels occupy 8 bytes, 4 bits padding at MSB
+    //            min_stride = (raw_width+5)/6 * 8
+    // LEGACY12:  0000 P4(11:0) P3(11:0) P2(11:0) P1(11:0) P0(11:0)
+    //            5 pixels occupy 8 bytes, 4 bits padding at MSB
+    //            min_stride = (raw_width+4)/5 * 8
+    //    MIPI8:  P0(7:0)
+    //            1 pixel occupy 1 byte
+    //            min_stride = raw_width
+    //   MIPI10:  P3(1:0) P2(1:0) P1(1:0) P0(1:0) P3(9:2) P2(9:2) P1(9:2) P0(9:2)
+    //            4 pixels occupy 5 bytes
+    //            min_stride = (raw_width+3)/4 * 5
+    //   MIPI12:  P1(3:0) P0(3:0) P1(11:4) P0(11:4)
+    //            2 pixels occupy 3 bytes
+    //            min_stride = (raw_width+1)/2 * 3
+    //Note that opaque_raw_stride needs to be at least the required minimum
+    //stride from the table above. ISP hardware may need more generous stride
+    //setting. For example, for LEGACY8, the actual stride may be
+    //CEILING16(raw_width) due to bus burst length requirement.
+    QCAMERA3_OPAQUE_RAW_FORMAT,
+    QCAMERA3_OPAQUE_RAW_END,
+
+    QCAMERA3_CROP_COUNT_REPROCESS = QCAMERA3_CROP_START,
+    QCAMERA3_CROP_REPROCESS,
+    QCAMERA3_CROP_ROI_MAP_REPROCESS,
+    QCAMERA3_CROP_END,
+
+    QCAMERA3_TUNING_META_DATA_BLOB = QCAMERA3_TUNING_META_DATA_START,
+    QCAMERA3_TUNING_META_DATA_END,
+
+    QCAMERA3_USE_AV_TIMER = QCAMERA3_AV_TIMER_START,
+    QCAMERA3_AV_TIMER_END,
+
+    QCAMERA3_SENSOR_DYNAMIC_BLACK_LEVEL_PATTERN = QCAMERA3_SENSOR_META_DATA_START,
+    QCAMERA3_SENSOR_META_DATA_END,
+
+    QCAMERA3_TEMPORAL_DENOISE_ENABLE = QCAMERA3_TEMPORAL_DENOISE_START,
+    QCAMERA3_TEMPORAL_DENOISE_PROCESS_TYPE,
+    QCAMERA3_TEMPORAL_DENOISE_END,
+
+    NEXUS_EXPERIMENTAL_2015_SENSOR_DYNAMIC_BLACK_LEVEL = NEXUS_EXPERIMENTAL_2015_START,
+    NEXUS_EXPERIMENTAL_2015_SENSOR_INFO_OPTICALLY_SHIELDED_REGIONS,
+    NEXUS_EXPERIMENTAL_2015_END
+};
+
+// QCAMERA3_OPAQUE_RAW_FORMAT
+typedef enum qcamera3_ext_opaque_raw_format {
+    QCAMERA3_OPAQUE_RAW_FORMAT_LEGACY,
+    QCAMERA3_OPAQUE_RAW_FORMAT_MIPI
+} qcamera3_ext_opaque_raw_format_t;
+
+class QCamera3VendorTags {
+
+public:
+    static void get_vendor_tag_ops(vendor_tag_ops_t* ops);
+    static int get_tag_count(
+            const vendor_tag_ops_t *ops);
+    static void get_all_tags(
+            const vendor_tag_ops_t *ops,
+            uint32_t *tag_array);
+    static const char* get_section_name(
+            const vendor_tag_ops_t *ops,
+            uint32_t tag);
+    static const char* get_tag_name(
+            const vendor_tag_ops_t *ops,
+            uint32_t tag);
+    static int get_tag_type(
+            const vendor_tag_ops_t *ops,
+            uint32_t tag);
+
+    static const vendor_tag_ops_t *Ops;
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA3VENDORTAGS_H__ */
diff --git a/camera/QCamera2/QCamera2Factory.cpp b/camera/QCamera2/QCamera2Factory.cpp
new file mode 100644
index 0000000..676dd65
--- /dev/null
+++ b/camera/QCamera2/QCamera2Factory.cpp
@@ -0,0 +1,498 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define LOG_TAG "QCamera2Factory"
+//#define LOG_NDEBUG 0
+
+#include <stdlib.h>
+#include <utils/Log.h>
+#include <utils/Errors.h>
+#include <hardware/camera.h>
+#include <hardware/camera3.h>
+
+#include "HAL/QCamera2HWI.h"
+#include "HAL3/QCamera3HWI.h"
+#include "util/QCameraFlash.h"
+#include "QCamera2Factory.h"
+
+using namespace android;
+
+namespace qcamera {
+
+QCamera2Factory *gQCamera2Factory = NULL;
+
+/*===========================================================================
+ * FUNCTION   : QCamera2Factory
+ *
+ * DESCRIPTION: default constructor of QCamera2Factory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera2Factory::QCamera2Factory()
+{
+    camera_info info;
+    mHalDescriptors = NULL;
+    mCallbacks = NULL;
+    mNumOfCameras = get_num_of_cameras();
+    char prop[PROPERTY_VALUE_MAX];
+    property_get("persist.camera.HAL3.enabled", prop, "1");
+    int isHAL3Enabled = atoi(prop);
+
+    if ((mNumOfCameras > 0) && (mNumOfCameras <= MM_CAMERA_MAX_NUM_SENSORS)) {
+        mHalDescriptors = new hal_desc[mNumOfCameras];
+        if ( NULL != mHalDescriptors) {
+            uint32_t cameraId = 0;
+
+            for (int i = 0; i < mNumOfCameras ; i++, cameraId++) {
+                mHalDescriptors[i].cameraId = cameraId;
+                if (isHAL3Enabled) {
+                    mHalDescriptors[i].device_version = CAMERA_DEVICE_API_VERSION_3_0;
+                } else {
+                    mHalDescriptors[i].device_version = CAMERA_DEVICE_API_VERSION_1_0;
+                }
+                //Query camera at this point in order
+                //to avoid any delays during subsequent
+                //calls to 'getCameraInfo()'
+                getCameraInfo(i, &info);
+            }
+        } else {
+            ALOGE("%s: Not enough resources to allocate HAL descriptor table!",
+                  __func__);
+        }
+    } else {
+        ALOGE("%s: %d camera devices detected!", __func__, mNumOfCameras);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCamera2Factory
+ *
+ * DESCRIPTION: deconstructor of QCamera2Factory
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCamera2Factory::~QCamera2Factory()
+{
+    if ( NULL != mHalDescriptors ) {
+        delete [] mHalDescriptors;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : get_number_of_cameras
+ *
+ * DESCRIPTION: static function to query number of cameras detected
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of cameras detected
+ *==========================================================================*/
+int QCamera2Factory::get_number_of_cameras()
+{
+    if (!gQCamera2Factory) {
+        gQCamera2Factory = new QCamera2Factory();
+        if (!gQCamera2Factory) {
+            ALOGE("%s: Failed to allocate Camera2Factory object", __func__);
+            return 0;
+        }
+    }
+    return gQCamera2Factory->getNumberOfCameras();
+}
+
+/*===========================================================================
+ * FUNCTION   : get_camera_info
+ *
+ * DESCRIPTION: static function to query camera information with its ID
+ *
+ * PARAMETERS :
+ *   @camera_id : camera ID
+ *   @info      : ptr to camera info struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::get_camera_info(int camera_id, struct camera_info *info)
+{
+    return gQCamera2Factory->getCameraInfo(camera_id, info);
+}
+
+/*===========================================================================
+ * FUNCTION   : set_callbacks
+ *
+ * DESCRIPTION: static function to set callbacks function to camera module
+ *
+ * PARAMETERS :
+ *   @callbacks : ptr to callback functions
+ *
+ * RETURN     : NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::set_callbacks(const camera_module_callbacks_t *callbacks)
+{
+    return gQCamera2Factory->setCallbacks(callbacks);
+}
+
+/*===========================================================================
+ * FUNCTION   : open_legacy
+ *
+ * DESCRIPTION: Function to open older hal version implementation
+ *
+ * PARAMETERS :
+ *   @hw_device : ptr to struct storing camera hardware device info
+ *   @camera_id : camera ID
+ *   @halVersion: Based on camera_module_t.common.module_api_version
+ *
+ * RETURN     : 0  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::open_legacy(const struct hw_module_t* module,
+            const char* id, uint32_t halVersion, struct hw_device_t** device)
+{
+    if (module != &HAL_MODULE_INFO_SYM.common) {
+        ALOGE("Invalid module. Trying to open %p, expect %p",
+            module, &HAL_MODULE_INFO_SYM.common);
+        return INVALID_OPERATION;
+    }
+    if (!id) {
+        ALOGE("Invalid camera id");
+        return BAD_VALUE;
+    }
+    return gQCamera2Factory->openLegacy(atoi(id), halVersion, device);
+}
+
+/*===========================================================================
+ * FUNCTION   : set_torch_mode
+ *
+ * DESCRIPTION: Attempt to turn on or off the torch mode of the flash unit.
+ *
+ * PARAMETERS :
+ *   @camera_id : camera ID
+ *   @on        : Indicates whether to turn the flash on or off
+ *
+ * RETURN     : 0  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::set_torch_mode(const char* camera_id, bool on)
+{
+    return gQCamera2Factory->setTorchMode(camera_id, on);
+}
+
+/*===========================================================================
+ * FUNCTION   : getNumberOfCameras
+ *
+ * DESCRIPTION: query number of cameras detected
+ *
+ * PARAMETERS : none
+ *
+ * RETURN     : number of cameras detected
+ *==========================================================================*/
+int QCamera2Factory::getNumberOfCameras()
+{
+    return mNumOfCameras;
+}
+
+/*===========================================================================
+ * FUNCTION   : getCameraInfo
+ *
+ * DESCRIPTION: query camera information with its ID
+ *
+ * PARAMETERS :
+ *   @camera_id : camera ID
+ *   @info      : ptr to camera info struct
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::getCameraInfo(int camera_id, struct camera_info *info)
+{
+    int rc;
+    ALOGV("%s: E, camera_id = %d", __func__, camera_id);
+
+    if (!mNumOfCameras || camera_id >= mNumOfCameras || !info ||
+        (camera_id < 0)) {
+        return -ENODEV;
+    }
+
+    if ( NULL == mHalDescriptors ) {
+        ALOGE("%s : Hal descriptor table is not initialized!", __func__);
+        return NO_INIT;
+    }
+
+    if ( mHalDescriptors[camera_id].device_version == CAMERA_DEVICE_API_VERSION_3_0 ) {
+        rc = QCamera3HardwareInterface::getCamInfo(mHalDescriptors[camera_id].cameraId, info);
+    } else if (mHalDescriptors[camera_id].device_version == CAMERA_DEVICE_API_VERSION_1_0) {
+        rc = QCamera2HardwareInterface::getCapabilities(mHalDescriptors[camera_id].cameraId, info);
+    } else {
+        ALOGE("%s: Device version for camera id %d invalid %d",
+              __func__,
+              camera_id,
+              mHalDescriptors[camera_id].device_version);
+        return BAD_VALUE;
+    }
+
+    ALOGV("%s: X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setCallbacks
+ *
+ * DESCRIPTION: set callback functions to send asynchronous notifications to
+ *              frameworks.
+ *
+ * PARAMETERS :
+ *   @callbacks : callback function pointer
+ *
+ * RETURN     :
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::setCallbacks(const camera_module_callbacks_t *callbacks)
+{
+    int rc = NO_ERROR;
+    mCallbacks = callbacks;
+
+    rc = QCameraFlash::getInstance().registerCallbacks(callbacks);
+    if (rc != 0) {
+        ALOGE("%s : Failed to register callbacks with flash module!", __func__);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : cameraDeviceOpen
+ *
+ * DESCRIPTION: open a camera device with its ID
+ *
+ * PARAMETERS :
+ *   @camera_id : camera ID
+ *   @hw_device : ptr to struct storing camera hardware device info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::cameraDeviceOpen(int camera_id,
+                    struct hw_device_t **hw_device)
+{
+    int rc = NO_ERROR;
+    if (camera_id < 0 || camera_id >= mNumOfCameras)
+        return -ENODEV;
+
+    if ( NULL == mHalDescriptors ) {
+        ALOGE("%s : Hal descriptor table is not initialized!", __func__);
+        return NO_INIT;
+    }
+
+    if ( mHalDescriptors[camera_id].device_version == CAMERA_DEVICE_API_VERSION_3_0 ) {
+        QCamera3HardwareInterface *hw = new QCamera3HardwareInterface(mHalDescriptors[camera_id].cameraId,
+                mCallbacks);
+        if (!hw) {
+            ALOGE("Allocation of hardware interface failed");
+            return NO_MEMORY;
+        }
+        rc = hw->openCamera(hw_device);
+        if (rc != 0) {
+            delete hw;
+        }
+    } else if (mHalDescriptors[camera_id].device_version == CAMERA_DEVICE_API_VERSION_1_0) {
+        QCamera2HardwareInterface *hw = new QCamera2HardwareInterface((uint32_t)camera_id);
+        if (!hw) {
+            ALOGE("Allocation of hardware interface failed");
+            return NO_MEMORY;
+        }
+        rc = hw->openCamera(hw_device);
+        if (rc != NO_ERROR) {
+            delete hw;
+        }
+    } else {
+        ALOGE("%s: Device version for camera id %d invalid %d",
+              __func__,
+              camera_id,
+              mHalDescriptors[camera_id].device_version);
+        return BAD_VALUE;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : camera_device_open
+ *
+ * DESCRIPTION: static function to open a camera device by its ID
+ *
+ * PARAMETERS :
+ *   @camera_id : camera ID
+ *   @hw_device : ptr to struct storing camera hardware device info
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::camera_device_open(
+    const struct hw_module_t *module, const char *id,
+    struct hw_device_t **hw_device)
+{
+    if (module != &HAL_MODULE_INFO_SYM.common) {
+        ALOGE("Invalid module. Trying to open %p, expect %p",
+            module, &HAL_MODULE_INFO_SYM.common);
+        return INVALID_OPERATION;
+    }
+    if (!id) {
+        ALOGE("Invalid camera id");
+        return BAD_VALUE;
+    }
+    return gQCamera2Factory->cameraDeviceOpen(atoi(id), hw_device);
+}
+
+struct hw_module_methods_t QCamera2Factory::mModuleMethods = {
+    open: QCamera2Factory::camera_device_open,
+};
+
+/*===========================================================================
+ * FUNCTION   : openLegacy
+ *
+ * DESCRIPTION: Function to open older hal version implementation
+ *
+ * PARAMETERS :
+ *   @camera_id : camera ID
+ *   @halVersion: Based on camera_module_t.common.module_api_version
+ *   @hw_device : ptr to struct storing camera hardware device info
+ *
+ * RETURN     : 0  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::openLegacy(
+        int32_t cameraId, uint32_t halVersion, struct hw_device_t** hw_device)
+{
+    int rc = NO_ERROR;
+
+    ALOGI(":%s openLegacy halVersion: %d", __func__, halVersion);
+    //Assumption: all cameras can support legacy API version
+    if (cameraId < 0 || cameraId >= gQCamera2Factory->getNumberOfCameras())
+        return -ENODEV;
+
+    switch(halVersion)
+    {
+        case CAMERA_DEVICE_API_VERSION_1_0:
+        {
+            QCamera2HardwareInterface *hw =
+                new QCamera2HardwareInterface((uint32_t)cameraId);
+            if (!hw) {
+                ALOGE("%s: Allocation of hardware interface failed", __func__);
+                return NO_MEMORY;
+            }
+            rc = hw->openCamera(hw_device);
+            if (rc != NO_ERROR) {
+                delete hw;
+            }
+            break;
+        }
+        default:
+            ALOGE("%s: Device API version: %d for camera id %d invalid",
+                __func__, halVersion, cameraId);
+            return BAD_VALUE;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : setTorchMode
+ *
+ * DESCRIPTION: Attempt to turn on or off the torch mode of the flash unit.
+ *
+ * PARAMETERS :
+ *   @camera_id : camera ID
+ *   @on        : Indicates whether to turn the flash on or off
+ *
+ * RETURN     : 0  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int QCamera2Factory::setTorchMode(const char* camera_id, bool on)
+{
+    int retVal(0);
+    long cameraIdLong(-1);
+    int cameraIdInt(-1);
+    char* endPointer = NULL;
+    errno = 0;
+    QCameraFlash& flash = QCameraFlash::getInstance();
+
+    cameraIdLong = strtol(camera_id, &endPointer, 10);
+
+    if ((errno == ERANGE) ||
+            (cameraIdLong < 0) ||
+            (cameraIdLong >= static_cast<long>(get_number_of_cameras())) ||
+            (endPointer == camera_id) ||
+            (*endPointer != '\0')) {
+        retVal = -EINVAL;
+    } else if (on) {
+        cameraIdInt = static_cast<int>(cameraIdLong);
+        retVal = flash.initFlash(cameraIdInt);
+
+        if (retVal == 0) {
+            retVal = flash.setFlashMode(cameraIdInt, on);
+            if ((retVal == 0) && (mCallbacks != NULL)) {
+                mCallbacks->torch_mode_status_change(mCallbacks,
+                        camera_id,
+                        TORCH_MODE_STATUS_AVAILABLE_ON);
+            } else if (retVal == -EALREADY) {
+                // Flash is already on, so treat this as a success.
+                retVal = 0;
+            }
+        }
+    } else {
+        cameraIdInt = static_cast<int>(cameraIdLong);
+        retVal = flash.setFlashMode(cameraIdInt, on);
+
+        if (retVal == 0) {
+            retVal = flash.deinitFlash(cameraIdInt);
+            if ((retVal == 0) && (mCallbacks != NULL)) {
+                mCallbacks->torch_mode_status_change(mCallbacks,
+                        camera_id,
+                        TORCH_MODE_STATUS_AVAILABLE_OFF);
+            }
+        } else if (retVal == -EALREADY) {
+            // Flash is already off, so treat this as a success.
+            retVal = 0;
+        }
+    }
+
+    return retVal;
+}
+
+}; // namespace qcamera
+
diff --git a/camera/QCamera2/QCamera2Factory.h b/camera/QCamera2/QCamera2Factory.h
new file mode 100644
index 0000000..7090eaf
--- /dev/null
+++ b/camera/QCamera2/QCamera2Factory.h
@@ -0,0 +1,80 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#ifndef __QCAMERA2FACTORY_H__
+#define __QCAMERA2FACTORY_H__
+
+#include <hardware/camera.h>
+#include <system/camera.h>
+#include <hardware/camera3.h>
+
+namespace qcamera {
+
+typedef struct {
+    uint32_t cameraId;
+    uint32_t device_version;
+} hal_desc;
+
+class QCamera2Factory
+{
+public:
+    QCamera2Factory();
+    virtual ~QCamera2Factory();
+
+    static int get_number_of_cameras();
+    static int get_camera_info(int camera_id, struct camera_info *info);
+    static int set_callbacks(const camera_module_callbacks_t *callbacks);
+    static int open_legacy(const struct hw_module_t* module,
+            const char* id, uint32_t halVersion, struct hw_device_t** device);
+    static int set_torch_mode(const char* camera_id, bool on);
+
+private:
+    int getNumberOfCameras();
+    int getCameraInfo(int camera_id, struct camera_info *info);
+    int setCallbacks(const camera_module_callbacks_t *callbacks);
+    int cameraDeviceOpen(int camera_id, struct hw_device_t **hw_device);
+    static int camera_device_open(const struct hw_module_t *module, const char *id,
+                struct hw_device_t **hw_device);
+    static int openLegacy(
+            int32_t cameraId, uint32_t halVersion, struct hw_device_t** hw_device);
+    int setTorchMode(const char* camera_id, bool on);
+public:
+    static struct hw_module_methods_t mModuleMethods;
+
+private:
+    int mNumOfCameras;
+    hal_desc *mHalDescriptors;
+    const camera_module_callbacks_t *mCallbacks;
+};
+
+}; /*namespace qcamera*/
+
+extern camera_module_t HAL_MODULE_INFO_SYM;
+
+#endif /* __QCAMERA2FACTORY_H__ */
diff --git a/camera/QCamera2/QCamera2Hal.cpp b/camera/QCamera2/QCamera2Hal.cpp
new file mode 100755
index 0000000..81fbc9b
--- /dev/null
+++ b/camera/QCamera2/QCamera2Hal.cpp
@@ -0,0 +1,55 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#include "QCamera2Factory.h"
+#include "HAL3/QCamera3VendorTags.h"
+
+static hw_module_t camera_common = {
+    .tag = HARDWARE_MODULE_TAG,
+    .module_api_version = CAMERA_MODULE_API_VERSION_2_4,
+    .hal_api_version = HARDWARE_HAL_API_VERSION,
+    .id = CAMERA_HARDWARE_MODULE_ID,
+    .name = "QCamera Module",
+    .author = "Qualcomm Innovation Center Inc",
+    .methods = &qcamera::QCamera2Factory::mModuleMethods,
+    .dso = NULL,
+    .reserved = {0}
+};
+
+camera_module_t HAL_MODULE_INFO_SYM = {
+    .common = camera_common,
+    .get_number_of_cameras = qcamera::QCamera2Factory::get_number_of_cameras,
+    .get_camera_info = qcamera::QCamera2Factory::get_camera_info,
+    .set_callbacks = qcamera::QCamera2Factory::set_callbacks,
+    .get_vendor_tag_ops = qcamera::QCamera3VendorTags::get_vendor_tag_ops,
+    .open_legacy = qcamera::QCamera2Factory::open_legacy,
+    .set_torch_mode = qcamera::QCamera2Factory::set_torch_mode,
+    .init  = NULL,
+    .reserved = {0}
+};
diff --git a/camera/QCamera2/stack/Android.mk b/camera/QCamera2/stack/Android.mk
new file mode 100644
index 0000000..a357417
--- /dev/null
+++ b/camera/QCamera2/stack/Android.mk
@@ -0,0 +1,5 @@
+LOCAL_PATH:= $(call my-dir)
+include $(LOCAL_PATH)/mm-camera-interface/Android.mk
+include $(LOCAL_PATH)/mm-jpeg-interface/Android.mk
+include $(LOCAL_PATH)/mm-jpeg-interface/test/Android.mk
+include $(LOCAL_PATH)/mm-camera-test/Android.mk
diff --git a/camera/QCamera2/stack/common/cam_intf.h b/camera/QCamera2/stack/common/cam_intf.h
new file mode 100644
index 0000000..52682c3
--- /dev/null
+++ b/camera/QCamera2/stack/common/cam_intf.h
@@ -0,0 +1,849 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_INTF_H__
+#define __QCAMERA_INTF_H__
+
+#include <string.h>
+#include <media/msmb_isp.h>
+#include "cam_types.h"
+
+#define CAM_PRIV_IOCTL_BASE (V4L2_CID_PRIVATE_BASE + 14)
+typedef enum {
+    /* session based parameters */
+    CAM_PRIV_PARM = CAM_PRIV_IOCTL_BASE,
+    /* session based action: do auto focus.*/
+    CAM_PRIV_DO_AUTO_FOCUS,
+    /* session based action: cancel auto focus.*/
+    CAM_PRIV_CANCEL_AUTO_FOCUS,
+    /* session based action: prepare for snapshot.*/
+    CAM_PRIV_PREPARE_SNAPSHOT,
+    /* sync stream info.*/
+    CAM_PRIV_STREAM_INFO_SYNC,
+    /* stream based parameters*/
+    CAM_PRIV_STREAM_PARM,
+    /* start ZSL snapshot.*/
+    CAM_PRIV_START_ZSL_SNAPSHOT,
+    /* stop ZSL snapshot.*/
+    CAM_PRIV_STOP_ZSL_SNAPSHOT,
+} cam_private_ioctl_enum_t;
+
+/* capability struct definition for HAL 1*/
+typedef struct{
+    cam_hal_version_t version;
+
+    cam_position_t position;                                /* sensor position: front, back */
+
+    uint8_t auto_hdr_supported;
+
+    uint16_t isWnrSupported;
+    /* supported iso modes */
+    size_t supported_iso_modes_cnt;
+    cam_iso_mode_type supported_iso_modes[CAM_ISO_MODE_MAX];
+
+    /* supported flash modes */
+    size_t supported_flash_modes_cnt;
+    cam_flash_mode_t supported_flash_modes[CAM_FLASH_MODE_MAX];
+
+    size_t zoom_ratio_tbl_cnt;                              /* table size for zoom ratios */
+    uint32_t zoom_ratio_tbl[MAX_ZOOMS_CNT];                 /* zoom ratios table */
+
+    /* supported effect modes */
+    size_t supported_effects_cnt;
+    cam_effect_mode_type supported_effects[CAM_EFFECT_MODE_MAX];
+
+    /* supported scene modes */
+    size_t supported_scene_modes_cnt;
+    cam_scene_mode_type supported_scene_modes[CAM_SCENE_MODE_MAX];
+
+    /* supported auto exposure modes */
+    size_t supported_aec_modes_cnt;
+    cam_auto_exposure_mode_type supported_aec_modes[CAM_AEC_MODE_MAX];
+
+    size_t fps_ranges_tbl_cnt;                              /* fps ranges table size */
+    cam_fps_range_t fps_ranges_tbl[MAX_SIZES_CNT];          /* fps ranges table */
+
+    /* supported antibanding modes */
+    size_t supported_antibandings_cnt;
+    cam_antibanding_mode_type supported_antibandings[CAM_ANTIBANDING_MODE_MAX];
+
+    /* supported white balance modes */
+    size_t supported_white_balances_cnt;
+    cam_wb_mode_type supported_white_balances[CAM_WB_MODE_MAX];
+
+    /* supported manual wb cct */
+    int32_t min_wb_cct;
+    int32_t max_wb_cct;
+
+    /* supported manual wb rgb gains */
+    float min_wb_gain;
+    float max_wb_gain;
+
+    /* supported focus modes */
+    size_t supported_focus_modes_cnt;
+    cam_focus_mode_type supported_focus_modes[CAM_FOCUS_MODE_MAX];
+
+    /* supported manual focus position */
+    float min_focus_pos[CAM_MANUAL_FOCUS_MODE_MAX];
+    float max_focus_pos[CAM_MANUAL_FOCUS_MODE_MAX];
+
+    int32_t exposure_compensation_min;       /* min value of exposure compensation index */
+    int32_t exposure_compensation_max;       /* max value of exposure compensation index */
+    int32_t exposure_compensation_default;   /* default value of exposure compensation index */
+    float exposure_compensation_step;
+    cam_rational_type_t exp_compensation_step;    /* exposure compensation step value */
+
+    uint8_t video_stablization_supported; /* flag id video stablization is supported */
+
+    size_t picture_sizes_tbl_cnt;                           /* picture sizes table size */
+    cam_dimension_t picture_sizes_tbl[MAX_SIZES_CNT];       /* picture sizes table */
+    /* The minimum frame duration that is supported for each
+     * resolution in availableProcessedSizes. Should correspond
+     * to the frame duration when only that processed stream
+     * is active, with all processing set to FAST */
+    int64_t picture_min_duration[MAX_SIZES_CNT];
+
+    /* capabilities specific to HAL 1 */
+
+    int32_t modes_supported;                                /* mask of modes supported: 2D, 3D */
+    uint32_t sensor_mount_angle;                            /* sensor mount angle */
+
+    float focal_length;                                     /* focal length */
+    float hor_view_angle;                                   /* horizontal view angle */
+    float ver_view_angle;                                   /* vertical view angle */
+
+    size_t preview_sizes_tbl_cnt;                           /* preview sizes table size */
+    cam_dimension_t preview_sizes_tbl[MAX_SIZES_CNT];       /* preiew sizes table */
+
+    size_t video_sizes_tbl_cnt;                             /* video sizes table size */
+    cam_dimension_t video_sizes_tbl[MAX_SIZES_CNT];         /* video sizes table */
+
+
+    size_t livesnapshot_sizes_tbl_cnt;                      /* livesnapshot sizes table size */
+    cam_dimension_t livesnapshot_sizes_tbl[MAX_SIZES_CNT];  /* livesnapshot sizes table */
+
+    size_t hfr_tbl_cnt;                                     /* table size for HFR */
+    cam_hfr_info_t hfr_tbl[CAM_HFR_MODE_MAX];               /* HFR table */
+
+    /* supported preview formats */
+    size_t supported_preview_fmt_cnt;
+    cam_format_t supported_preview_fmts[CAM_FORMAT_MAX];
+
+    /* supported picture formats */
+    size_t supported_picture_fmt_cnt;
+    cam_format_t supported_picture_fmts[CAM_FORMAT_MAX];
+
+    uint8_t max_downscale_factor;
+
+    /* dimension and supported output format of raw dump from camif */
+    size_t supported_raw_dim_cnt;
+    cam_dimension_t raw_dim[MAX_SIZES_CNT];
+    size_t supported_raw_fmt_cnt;
+    cam_format_t supported_raw_fmts[CAM_FORMAT_MAX];
+    /* The minimum frame duration that is supported for above
+       raw resolution */
+    int64_t raw_min_duration[MAX_SIZES_CNT];
+
+    /* 3A version*/
+    cam_q3a_version_t q3a_version;
+    /* supported focus algorithms */
+    size_t supported_focus_algos_cnt;
+    cam_focus_algorithm_type supported_focus_algos[CAM_FOCUS_ALGO_MAX];
+
+
+    uint8_t auto_wb_lock_supported;       /* flag if auto white balance lock is supported */
+    uint8_t zoom_supported;               /* flag if zoom is supported */
+    uint8_t smooth_zoom_supported;        /* flag if smooth zoom is supported */
+    uint8_t auto_exposure_lock_supported; /* flag if auto exposure lock is supported */
+    uint8_t video_snapshot_supported;     /* flag if video snapshot is supported */
+
+    uint8_t max_num_roi;                  /* max number of roi can be detected */
+    uint8_t max_num_focus_areas;          /* max num of focus areas */
+    uint8_t max_num_metering_areas;       /* max num opf metering areas */
+    uint8_t max_zoom_step;                /* max zoom step value */
+
+    /* QCOM specific control */
+    cam_control_range_t brightness_ctrl;  /* brightness */
+    cam_control_range_t sharpness_ctrl;   /* sharpness */
+    cam_control_range_t contrast_ctrl;    /* contrast */
+    cam_control_range_t saturation_ctrl;  /* saturation */
+    cam_control_range_t sce_ctrl;         /* skintone enhancement factor */
+
+    /* QCOM HDR specific control. Indicates number of frames and exposure needs for the frames */
+    cam_hdr_bracketing_info_t hdr_bracketing_setting;
+
+    uint32_t qcom_supported_feature_mask; /* mask of qcom specific features supported:
+                                           * such as CAM_QCOM_FEATURE_SUPPORTED_FACE_DETECTION*/
+    cam_padding_info_t padding_info;      /* padding information from PP */
+    uint32_t min_num_pp_bufs;             /* minimum number of buffers needed by postproc module */
+    uint32_t min_required_pp_mask;        /* min required pp feature masks for ZSL.
+                                           * depends on hardware limitation, i.e. for 8974,
+                                           * sharpness is required for all ZSL snapshot frames */
+    cam_format_t rdi_mode_stream_fmt;  /* stream format supported in rdi mode */
+
+    /* capabilities specific to HAL 3 */
+
+    float min_focus_distance;
+    float hyper_focal_distance;
+
+    float focal_lengths[CAM_FOCAL_LENGTHS_MAX];
+    uint8_t focal_lengths_count;
+
+    /* Needs to be regular f number instead of APEX */
+    float apertures[CAM_APERTURES_MAX];
+    uint8_t apertures_count;
+
+    float filter_densities[CAM_FILTER_DENSITIES_MAX];
+    uint8_t filter_densities_count;
+
+    uint8_t optical_stab_modes[CAM_OPT_STAB_MAX];
+    uint8_t optical_stab_modes_count;
+
+    cam_dimension_t lens_shading_map_size;
+    float lens_shading_map[3 * CAM_MAX_MAP_WIDTH *
+              CAM_MAX_MAP_HEIGHT];
+
+    cam_dimension_t geo_correction_map_size;
+    float geo_correction_map[2 * 3 * CAM_MAX_MAP_WIDTH *
+              CAM_MAX_MAP_HEIGHT];
+
+    float lens_position[3];
+
+    /* nano seconds */
+    int64_t exposure_time_range[EXPOSURE_TIME_RANGE_CNT];
+
+    /* nano seconds */
+    int64_t max_frame_duration;
+
+    cam_color_filter_arrangement_t color_arrangement;
+    uint8_t num_color_channels;
+
+    /* parameters required to calculate S and O co-efficients */
+    double gradient_S;
+    double offset_S;
+    double gradient_O;
+    double offset_O;
+
+    float sensor_physical_size[SENSOR_PHYSICAL_SIZE_CNT];
+
+    /* Dimensions of full pixel array, possibly including
+       black calibration pixels */
+    cam_dimension_t pixel_array_size;
+    /* Area of raw data which corresponds to only active
+       pixels; smaller or equal to pixelArraySize. */
+    cam_rect_t active_array_size;
+
+    /* Maximum raw value output by sensor */
+    int32_t white_level;
+
+    /* A fixed black level offset for each of the Bayer
+       mosaic channels */
+    int32_t black_level_pattern[BLACK_LEVEL_PATTERN_CNT];
+
+    /* Time taken before flash can fire again in nano secs */
+    int64_t flash_charge_duration;
+
+    /* flash firing power */
+    size_t supported_flash_firing_level_cnt;
+    cam_format_t supported_firing_levels[CAM_FLASH_FIRING_LEVEL_MAX];
+
+    /* Flash Firing Time */
+    int64_t flash_firing_time;
+
+    /* Flash Ciolor Temperature */
+    uint8_t flash_color_temp;
+
+    /* Flash max Energy */
+    uint8_t flash_max_energy;
+
+    /* Maximum number of supported points in the tonemap
+       curve */
+    int32_t max_tone_map_curve_points;
+
+    /* supported formats */
+    size_t supported_scalar_format_cnt;
+    cam_format_t supported_scalar_fmts[CAM_FORMAT_MAX];
+
+    uint32_t max_face_detection_count;
+    uint8_t hw_analysis_supported;
+
+    uint8_t histogram_supported;
+    /* Number of histogram buckets supported */
+    int32_t histogram_size;
+    /* Maximum value possible for a histogram bucket */
+    int32_t max_histogram_count;
+
+    cam_dimension_t sharpness_map_size;
+
+    /* Maximum value possible for a sharpness map region */
+    int32_t max_sharpness_map_value;
+
+    /*Autoexposure modes for camera 3 api*/
+    size_t supported_ae_modes_cnt;
+    cam_ae_mode_type supported_ae_modes[CAM_AE_MODE_MAX];
+
+
+    cam_sensitivity_range_t sensitivity_range;
+    int32_t max_analog_sensitivity;
+
+    /* picture sizes need scale*/
+    cam_scene_mode_overrides_t scene_mode_overrides[CAM_SCENE_MODE_MAX];
+    size_t scale_picture_sizes_cnt;
+    cam_dimension_t scale_picture_sizes[MAX_SCALE_SIZES_CNT];
+
+    uint8_t flash_available;
+
+    cam_rational_type_t base_gain_factor;    /* sensor base gain factor */
+    /* AF Bracketing info */
+    cam_af_bracketing_t  ubifocus_af_bracketing_need;
+    cam_af_bracketing_t  refocus_af_bracketing_need;
+    /* opti Zoom info */
+    cam_opti_zoom_t      opti_zoom_settings_need;
+    /* still more info */
+    cam_still_more_t  stillmore_settings_need;
+    /* chroma flash info */
+    cam_chroma_flash_t chroma_flash_settings_need;
+
+    cam_rational_type_t forward_matrix[3][3];
+    cam_rational_type_t color_transform[3][3];
+
+    uint8_t focus_dist_calibrated;
+    uint8_t supported_test_pattern_modes_cnt;
+    cam_test_pattern_mode_t supported_test_pattern_modes[MAX_TEST_PATTERN_CNT];
+
+    int64_t stall_durations[MAX_SIZES_CNT];
+
+    cam_illuminat_t reference_illuminant1;
+    cam_illuminat_t reference_illuminant2;
+
+    int64_t jpeg_stall_durations[MAX_SIZES_CNT];
+    int64_t raw16_stall_durations[MAX_SIZES_CNT];
+    cam_rational_type_t forward_matrix1[FORWARD_MATRIX_ROWS][FORWARD_MATRIX_COLS];
+    cam_rational_type_t forward_matrix2[FORWARD_MATRIX_ROWS][FORWARD_MATRIX_COLS];
+    cam_rational_type_t color_transform1[COLOR_TRANSFORM_ROWS][COLOR_TRANSFORM_COLS];
+    cam_rational_type_t color_transform2[COLOR_TRANSFORM_ROWS][COLOR_TRANSFORM_COLS];
+    cam_rational_type_t calibration_transform1[CAL_TRANSFORM_ROWS][CAL_TRANSFORM_COLS];
+    cam_rational_type_t calibration_transform2[CAL_TRANSFORM_ROWS][CAL_TRANSFORM_COLS];
+    uint16_t isCacSupported;
+
+    cam_opaque_raw_format_t opaque_raw_fmt;
+
+    /* true Portrait info */
+    cam_true_portrait_t  true_portrait_settings_need;
+
+    /* Sensor type information */
+    cam_sensor_type_t sensor_type;
+
+    cam_aberration_mode_t aberration_modes[CAM_COLOR_CORRECTION_ABERRATION_MAX];
+    uint32_t aberration_modes_count;
+
+    /* Can the sensor timestamp be compared to
+     * timestamps from other sub-systems (gyro, accelerometer etc.) */
+    uint8_t isTimestampCalibrated;
+
+    /* Analysis stream max supported size */
+    cam_dimension_t analysis_max_res;
+    /* Analysis stream padding info */
+    cam_padding_info_t analysis_padding_info;
+    /* Max size supported by ISP viewfinder path */
+    cam_dimension_t max_viewfinder_size;
+
+    /* Analysis recommended size */
+    cam_dimension_t analysis_recommended_res;
+
+    /* Analysis recommended format */
+    cam_format_t analysis_recommended_format;
+
+    /* This is set to 'true' if sensor cannot guarantee per frame control */
+    /* Default value of this capability is 'false' indicating per-frame */
+    /* control is supported */
+    uint8_t no_per_frame_control_support;
+
+    /* EIS information */
+    uint8_t supported_is_types_cnt;
+    uint32_t supported_is_types[IS_TYPE_MAX];
+    /*for each type, specify the margin needed. Margin will be
+      the decimal representation of a percentage
+      ex: 10% margin = 0.1 */
+    float supported_is_type_margins[IS_TYPE_MAX];
+
+    /* Max cpp batch size */
+    uint8_t max_batch_bufs_supported;
+    uint8_t flash_dev_name[QCAMERA_MAX_FILEPATH_LENGTH];
+    uint8_t eeprom_version_info[MAX_EEPROM_VERSION_INFO_LEN];
+
+    /* maximum pixel bandwidth shared between cameras */
+    uint64_t max_pixel_bandwidth;
+
+    /* Array of K integers, where K%4==0,
+      as a list of rectangles in the pixelArray co-ord system
+      left, top, right, bottom */
+    int32_t optical_black_regions[MAX_OPTICAL_BLACK_REGIONS * 4];
+    /* Count is K/4 */
+    uint8_t optical_black_region_count;
+
+} cam_capability_t;
+
+typedef enum {
+    CAM_STREAM_PARAM_TYPE_DO_REPROCESS = CAM_INTF_PARM_DO_REPROCESS,
+    CAM_STREAM_PARAM_TYPE_SET_BUNDLE_INFO = CAM_INTF_PARM_SET_BUNDLE,
+    CAM_STREAM_PARAM_TYPE_SET_FLIP = CAM_INTF_PARM_STREAM_FLIP,
+    CAM_STREAM_PARAM_TYPE_GET_OUTPUT_CROP = CAM_INTF_PARM_GET_OUTPUT_CROP,
+    CAM_STREAM_PARAM_TYPE_GET_IMG_PROP = CAM_INTF_PARM_GET_IMG_PROP,
+    CAM_STREAM_PARAM_TYPE_MAX
+} cam_stream_param_type_e;
+
+typedef struct {
+    uint32_t buf_index;           /* buf index to the source frame buffer that needs reprocess,
+                                    (assume buffer is already mapped)*/
+    uint32_t frame_idx;           /* frame id of source frame to be reprocessed */
+    int32_t ret_val;              /* return value from reprocess. Could have different meanings.
+                                     i.e., faceID in the case of face registration. */
+    uint8_t meta_present;         /* if there is meta data associated with this reprocess frame */
+    uint32_t meta_stream_handle;  /* meta data stream ID. only valid if meta_present != 0 */
+    uint32_t meta_buf_index;      /* buf index to meta data buffer. only valid if meta_present != 0 */
+
+    /* opaque metadata required for reprocessing */
+    int32_t private_data[MAX_METADATA_PRIVATE_PAYLOAD_SIZE_IN_BYTES];
+    cam_rect_t crop_rect;
+} cam_reprocess_param;
+
+typedef struct {
+    uint32_t flip_mask;
+} cam_flip_mode_t;
+
+#define IMG_NAME_SIZE 32
+typedef struct {
+    cam_rect_t crop;  /* crop info for the image */
+    cam_dimension_t input; /* input dimension of the image */
+    cam_dimension_t output; /* output dimension of the image */
+    char name[IMG_NAME_SIZE]; /* optional name of the ext*/
+    cam_format_t format; /* image format */
+} cam_stream_img_prop_t;
+
+typedef struct {
+    cam_stream_param_type_e type;
+    union {
+        cam_reprocess_param reprocess;  /* do reprocess */
+        cam_bundle_config_t bundleInfo; /* set bundle info*/
+        cam_flip_mode_t flipInfo;       /* flip mode */
+        cam_crop_data_t outputCrop;     /* output crop for current frame */
+        cam_stream_img_prop_t imgProp;  /* image properties of current frame */
+    };
+} cam_stream_parm_buffer_t;
+
+/* stream info */
+typedef struct {
+    /* stream ID from server */
+    uint32_t stream_svr_id;
+
+    /* stream type */
+    cam_stream_type_t stream_type;
+
+    /* image format */
+    cam_format_t fmt;
+
+    /* image dimension */
+    cam_dimension_t dim;
+
+    /* buffer plane information, will be calc based on stream_type, fmt,
+       dim, and padding_info(from stream config). Info including:
+       offset_x, offset_y, stride, scanline, plane offset */
+    cam_stream_buf_plane_info_t buf_planes;
+
+    /* number of stream bufs will be allocated */
+    uint32_t num_bufs;
+
+    /* streaming type */
+    cam_streaming_mode_t streaming_mode;
+
+    /* num of frames needs to be generated.
+     * only valid when streaming_mode = CAM_STREAMING_MODE_BURST */
+    uint8_t num_of_burst;
+
+    /* num of frames in one batch.
+     * only valid when streaming_mode = CAM_STREAMING_MODE_BATCH */
+    cam_stream_user_buf_info_t user_buf_info;
+
+    /* stream specific pp config */
+    cam_pp_feature_config_t pp_config;
+
+    /* this section is valid if offline reprocess type stream */
+    cam_stream_reproc_config_t reprocess_config;
+
+    cam_stream_parm_buffer_t parm_buf;    /* stream based parameters */
+
+    uint8_t dis_enable;
+
+    /* Image Stabilization type */
+    cam_is_type_t is_type;
+
+    /* Signifies Secure stream mode */
+    cam_stream_secure_t is_secure;
+
+    /* Preferred Performance mode */
+    cam_perf_mode_t perf_mode;
+
+    /* if frames will not be received */
+    uint8_t noFrameExpected;
+} cam_stream_info_t;
+
+/*****************************************************************************
+ *                 Code for Domain Socket Based Parameters                   *
+ ****************************************************************************/
+#define INCLUDE(PARAM_ID,DATATYPE,COUNT)  \
+        DATATYPE member_variable_##PARAM_ID[ COUNT ]
+
+#define POINTER_OF_META(META_ID, TABLE_PTR) \
+        ((NULL != TABLE_PTR) ? \
+            (&TABLE_PTR->data.member_variable_##META_ID[ 0 ]) : (NULL))
+
+#define SIZE_OF_PARAM(META_ID, TABLE_PTR) \
+        sizeof(TABLE_PTR->data.member_variable_##META_ID)
+
+#define IF_META_AVAILABLE(META_TYPE, META_PTR_NAME, META_ID, TABLE_PTR) \
+        META_TYPE *META_PTR_NAME = \
+        (((NULL != TABLE_PTR) && (TABLE_PTR->is_valid[META_ID])) ? \
+            (&TABLE_PTR->data.member_variable_##META_ID[ 0 ]) : \
+            (NULL)); \
+        if (NULL != META_PTR_NAME) \
+
+#define ADD_SET_PARAM_ENTRY_TO_BATCH(TABLE_PTR, META_ID, DATA) \
+    ((NULL != TABLE_PTR) ? \
+    ((TABLE_PTR->data.member_variable_##META_ID[ 0 ] = DATA), \
+    (TABLE_PTR->is_valid[META_ID] = 1), (0)) : \
+    ((ALOGE("%s: %d, Unable to set metadata TABLE_PTR:%p META_ID:%d", \
+    __func__, __LINE__, TABLE_PTR, META_ID)), (-1))) \
+
+#define ADD_SET_PARAM_ARRAY_TO_BATCH(TABLE_PTR, META_ID, PDATA, COUNT, RCOUNT) \
+{ \
+    if ((NULL != TABLE_PTR) && \
+            (0 < COUNT) && \
+            ((sizeof(TABLE_PTR->data.member_variable_##META_ID) / \
+            sizeof(TABLE_PTR->data.member_variable_##META_ID[ 0 ])) \
+            >= COUNT))  { \
+        for (size_t _i = 0; _i < COUNT ; _i++) { \
+            TABLE_PTR->data.member_variable_##META_ID[ _i ] = PDATA [ _i ]; \
+        } \
+        TABLE_PTR->is_valid[META_ID] = 1; \
+        RCOUNT = COUNT; \
+    } else { \
+        ALOGE("%s: %d, Unable to set metadata TABLE_PTR:%p META_ID:%d COUNT:%zu", \
+                __func__, __LINE__, TABLE_PTR, META_ID, COUNT); \
+        RCOUNT = 0; \
+    } \
+}
+
+#define ADD_GET_PARAM_ENTRY_TO_BATCH(TABLE_PTR, META_ID) \
+{ \
+    if (NULL != TABLE_PTR) { \
+        TABLE_PTR->is_reqd[META_ID] = 1; \
+    } else { \
+        ALOGE("%s: %d, Unable to get metadata TABLE_PTR:%p META_ID:%d", \
+                __func__, __LINE__, TABLE_PTR, META_ID); \
+    } \
+}
+
+#define READ_PARAM_ENTRY(TABLE_PTR, META_ID, DATA) \
+{ \
+    if (NULL != TABLE_PTR) { \
+        DATA = TABLE_PTR->data.member_variable_##META_ID[ 0 ]; \
+    } else { \
+        ALOGE("%s: %d, Unable to read metadata TABLE_PTR:%p META_ID:%d", \
+                __func__, __LINE__, TABLE_PTR, META_ID); \
+    } \
+}
+
+typedef struct {
+/**************************************************************************************
+ *  ID from (cam_intf_metadata_type_t)                DATATYPE                     COUNT
+ **************************************************************************************/
+    /* common between HAL1 and HAL3 */
+    INCLUDE(CAM_INTF_META_HISTOGRAM,                    cam_hist_stats_t,               1);
+    INCLUDE(CAM_INTF_META_FACE_DETECTION,               cam_face_detection_data_t,      1);
+    INCLUDE(CAM_INTF_META_AUTOFOCUS_DATA,               cam_auto_focus_data_t,          1);
+    INCLUDE(CAM_INTF_META_CDS_DATA,                     cam_cds_data_t,                 1);
+    INCLUDE(CAM_INTF_PARM_UPDATE_DEBUG_LEVEL,           uint32_t,                       1);
+
+    /* Specific to HAl1 */
+    INCLUDE(CAM_INTF_META_CROP_DATA,                    cam_crop_data_t,                1);
+    INCLUDE(CAM_INTF_META_PREP_SNAPSHOT_DONE,           int32_t,                        1);
+    INCLUDE(CAM_INTF_META_GOOD_FRAME_IDX_RANGE,         cam_frame_idx_range_t,          1);
+    INCLUDE(CAM_INTF_META_ASD_HDR_SCENE_DATA,           cam_asd_hdr_scene_data_t,       1);
+    INCLUDE(CAM_INTF_META_ASD_SCENE_TYPE,               int32_t,                        1);
+    INCLUDE(CAM_INTF_META_CURRENT_SCENE,                cam_scene_mode_type,            1);
+    INCLUDE(CAM_INTF_META_AWB_INFO,                     cam_awb_params_t,               1);
+    INCLUDE(CAM_INTF_META_FOCUS_POSITION,               cam_focus_pos_info_t,           1);
+    INCLUDE(CAM_INTF_META_CHROMATIX_LITE_ISP,           cam_chromatix_lite_isp_t,       1);
+    INCLUDE(CAM_INTF_META_CHROMATIX_LITE_PP,            cam_chromatix_lite_pp_t,        1);
+    INCLUDE(CAM_INTF_META_CHROMATIX_LITE_AE,            cam_chromatix_lite_ae_stats_t,  1);
+    INCLUDE(CAM_INTF_META_CHROMATIX_LITE_AWB,           cam_chromatix_lite_awb_stats_t, 1);
+    INCLUDE(CAM_INTF_META_CHROMATIX_LITE_AF,            cam_chromatix_lite_af_stats_t,  1);
+    INCLUDE(CAM_INTF_META_CHROMATIX_LITE_ASD,           cam_chromatix_lite_asd_stats_t, 1);
+    INCLUDE(CAM_INTF_BUF_DIVERT_INFO,                   cam_buf_divert_info_t,          1);
+
+    /* Specific to HAL3 */
+    INCLUDE(CAM_INTF_META_FRAME_NUMBER_VALID,           int32_t,                     1);
+    INCLUDE(CAM_INTF_META_URGENT_FRAME_NUMBER_VALID,    int32_t,                     1);
+    INCLUDE(CAM_INTF_META_FRAME_DROPPED,                cam_frame_dropped_t,         1);
+    INCLUDE(CAM_INTF_META_FRAME_NUMBER,                 uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_URGENT_FRAME_NUMBER,          uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_COLOR_CORRECT_MODE,           uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_COLOR_CORRECT_TRANSFORM,      cam_color_correct_matrix_t,  1);
+    INCLUDE(CAM_INTF_META_COLOR_CORRECT_GAINS,          cam_color_correct_gains_t,   1);
+    INCLUDE(CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM, cam_color_correct_matrix_t,  1);
+    INCLUDE(CAM_INTF_META_PRED_COLOR_CORRECT_GAINS,     cam_color_correct_gains_t,   1);
+    INCLUDE(CAM_INTF_META_AEC_ROI,                      cam_area_t,                  1);
+    INCLUDE(CAM_INTF_META_AEC_STATE,                    uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_FOCUS_MODE,                   uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_MANUAL_FOCUS_POS,             cam_manual_focus_parm_t,     1);
+    INCLUDE(CAM_INTF_META_AF_ROI,                       cam_area_t,                  1);
+    INCLUDE(CAM_INTF_META_AF_STATE,                     uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_WHITE_BALANCE,                int32_t,                     1);
+    INCLUDE(CAM_INTF_META_AWB_REGIONS,                  cam_area_t,                  1);
+    INCLUDE(CAM_INTF_META_AWB_STATE,                    uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_BLACK_LEVEL_LOCK,             uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_MODE,                         uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_EDGE_MODE,                    cam_edge_application_t,      1);
+    INCLUDE(CAM_INTF_META_FLASH_POWER,                  uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_FLASH_FIRING_TIME,            int64_t,                     1);
+    INCLUDE(CAM_INTF_META_FLASH_MODE,                   uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_FLASH_STATE,                  int32_t,                     1);
+    INCLUDE(CAM_INTF_META_HOTPIXEL_MODE,                uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_LENS_APERTURE,                float,                       1);
+    INCLUDE(CAM_INTF_META_LENS_FILTERDENSITY,           float,                       1);
+    INCLUDE(CAM_INTF_META_LENS_FOCAL_LENGTH,            float,                       1);
+    INCLUDE(CAM_INTF_META_LENS_FOCUS_DISTANCE,          float,                       1);
+    INCLUDE(CAM_INTF_META_LENS_FOCUS_RANGE,             float,                       2);
+    INCLUDE(CAM_INTF_META_LENS_STATE,                   cam_af_lens_state_t,         1);
+    INCLUDE(CAM_INTF_META_LENS_OPT_STAB_MODE,           uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_VIDEO_STAB_MODE,              uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_LENS_FOCUS_STATE,             uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_NOISE_REDUCTION_MODE,         uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_NOISE_REDUCTION_STRENGTH,     uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_SCALER_CROP_REGION,           cam_crop_region_t,           1);
+    INCLUDE(CAM_INTF_META_SCENE_FLICKER,                uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_SENSOR_EXPOSURE_TIME,         int64_t,                     1);
+    INCLUDE(CAM_INTF_META_SENSOR_FRAME_DURATION,        int64_t,                     1);
+    INCLUDE(CAM_INTF_META_SENSOR_SENSITIVITY,           int32_t,                     1);
+    INCLUDE(CAM_INTF_META_SENSOR_TIMESTAMP,             int64_t,                     1);
+    INCLUDE(CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW,  int64_t,                     1);
+    INCLUDE(CAM_INTF_META_SHADING_MODE,                 uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_STATS_FACEDETECT_MODE,        uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_STATS_HISTOGRAM_MODE,         uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_STATS_SHARPNESS_MAP_MODE,     uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_STATS_SHARPNESS_MAP,          cam_sharpness_map_t,         3);
+    INCLUDE(CAM_INTF_META_TONEMAP_CURVES,               cam_rgb_tonemap_curves,      1);
+    INCLUDE(CAM_INTF_META_LENS_SHADING_MAP,             cam_lens_shading_map_t,      1);
+    INCLUDE(CAM_INTF_META_AEC_INFO,                     cam_3a_params_t,             1);
+    INCLUDE(CAM_INTF_META_SENSOR_INFO,                  cam_sensor_params_t,         1);
+    INCLUDE(CAM_INTF_META_EXIF_DEBUG_AE,                cam_ae_exif_debug_t,         1);
+    INCLUDE(CAM_INTF_META_EXIF_DEBUG_AWB,               cam_awb_exif_debug_t,        1);
+    INCLUDE(CAM_INTF_META_EXIF_DEBUG_AF,                cam_af_exif_debug_t,         1);
+    INCLUDE(CAM_INTF_META_EXIF_DEBUG_ASD,               cam_asd_exif_debug_t,        1);
+    INCLUDE(CAM_INTF_META_EXIF_DEBUG_STATS,             cam_stats_buffer_exif_debug_t, 1);
+    INCLUDE(CAM_INTF_META_ASD_SCENE_CAPTURE_TYPE,       cam_auto_scene_t,            1);
+    INCLUDE(CAM_INTF_PARM_EFFECT,                       uint32_t,                    1);
+    /* Defining as int32_t so that this array is 4 byte aligned */
+    INCLUDE(CAM_INTF_META_PRIVATE_DATA,                 int32_t,
+            MAX_METADATA_PRIVATE_PAYLOAD_SIZE_IN_BYTES / 4);
+
+    /* Following are Params only and not metadata currently */
+    INCLUDE(CAM_INTF_PARM_HAL_VERSION,                  int32_t,                     1);
+    /* Shared between HAL1 and HAL3 */
+    INCLUDE(CAM_INTF_PARM_ANTIBANDING,                  uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_EXPOSURE_COMPENSATION,        int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_EV_STEP,                      cam_rational_type_t,         1);
+    INCLUDE(CAM_INTF_PARM_AEC_LOCK,                     uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_FPS_RANGE,                    cam_fps_range_t,             1);
+    INCLUDE(CAM_INTF_PARM_AWB_LOCK,                     uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_BESTSHOT_MODE,                uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_DIS_ENABLE,                   int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_LED_MODE,                     int32_t,                     1);
+    INCLUDE(CAM_INTF_META_LED_MODE_OVERRIDE,            uint32_t,                    1);
+
+    /* HAL1 specific */
+    /* read only */
+    INCLUDE(CAM_INTF_PARM_QUERY_FLASH4SNAP,             int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_EXPOSURE,                     int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_SHARPNESS,                    int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_CONTRAST,                     int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_SATURATION,                   int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_BRIGHTNESS,                   int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_ISO,                          int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_EXPOSURE_TIME,                uint64_t,                    1);
+    INCLUDE(CAM_INTF_PARM_ZOOM,                         int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_ROLLOFF,                      int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_MODE,                         int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_AEC_ALGO_TYPE,                int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_FOCUS_ALGO_TYPE,              int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_AEC_ROI,                      cam_set_aec_roi_t,           1);
+    INCLUDE(CAM_INTF_PARM_AF_ROI,                       cam_roi_info_t,              1);
+    INCLUDE(CAM_INTF_PARM_SCE_FACTOR,                   int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_FD,                           cam_fd_set_parm_t,           1);
+    INCLUDE(CAM_INTF_PARM_MCE,                          int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_HFR,                          int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_REDEYE_REDUCTION,             int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_WAVELET_DENOISE,              cam_denoise_param_t,         1);
+    INCLUDE(CAM_INTF_PARM_TEMPORAL_DENOISE,             cam_denoise_param_t,         1);
+    INCLUDE(CAM_INTF_PARM_HISTOGRAM,                    int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_ASD_ENABLE,                   int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_RECORDING_HINT,               int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_HDR,                          cam_exp_bracketing_t,        1);
+    INCLUDE(CAM_INTF_PARM_FRAMESKIP,                    int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_ZSL_MODE,                     int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_HDR_NEED_1X,                  int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_LOCK_CAF,                     int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_VIDEO_HDR,                    int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_SENSOR_HDR,                   int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_VT,                           int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_GET_CHROMATIX,                tune_chromatix_t,            1);
+    INCLUDE(CAM_INTF_PARM_SET_RELOAD_CHROMATIX,         tune_chromatix_t,            1);
+    INCLUDE(CAM_INTF_PARM_GET_AFTUNE,                   tune_autofocus_t,            1);
+    INCLUDE(CAM_INTF_PARM_SET_RELOAD_AFTUNE,            tune_autofocus_t,            1);
+    INCLUDE(CAM_INTF_PARM_SET_AUTOFOCUSTUNING,          tune_actuator_t,             1);
+    INCLUDE(CAM_INTF_PARM_SET_VFE_COMMAND,              tune_cmd_t,                  1);
+    INCLUDE(CAM_INTF_PARM_SET_PP_COMMAND,               tune_cmd_t,                  1);
+    INCLUDE(CAM_INTF_PARM_MAX_DIMENSION,                cam_dimension_t,             1);
+    INCLUDE(CAM_INTF_PARM_RAW_DIMENSION,                cam_dimension_t,             1);
+    INCLUDE(CAM_INTF_PARM_TINTLESS,                     int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_WB_MANUAL,                    cam_manual_wb_parm_t,        1);
+    INCLUDE(CAM_INTF_PARM_CDS_MODE,                     int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_EZTUNE_CMD,                   cam_eztune_cmd_data_t,       1);
+    INCLUDE(CAM_INTF_PARM_INT_EVT,                      cam_int_evt_params_t,        1);
+    INCLUDE(CAM_INTF_PARM_RDI_MODE,                     int32_t,                     1);
+    INCLUDE(CAM_INTF_PARM_BURST_NUM,                    uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_RETRO_BURST_NUM,              uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_BURST_LED_ON_PERIOD,          uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_LONGSHOT_ENABLE,              int8_t,                      1);
+    INCLUDE(CAM_INTF_PARM_TONE_MAP_MODE,                uint32_t,                    1);
+
+    /* HAL3 specific */
+    INCLUDE(CAM_INTF_META_STREAM_INFO,                  cam_stream_size_info_t,      1);
+    INCLUDE(CAM_INTF_META_AEC_MODE,                     uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_AEC_PRECAPTURE_TRIGGER,       cam_trigger_t,               1);
+    INCLUDE(CAM_INTF_META_AF_TRIGGER,                   cam_trigger_t,               1);
+    INCLUDE(CAM_INTF_META_CAPTURE_INTENT,               uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_DEMOSAIC,                     int32_t,                     1);
+    INCLUDE(CAM_INTF_META_SHARPNESS_STRENGTH,           int32_t,                     1);
+    INCLUDE(CAM_INTF_META_GEOMETRIC_MODE,               uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_GEOMETRIC_STRENGTH,           uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_LENS_SHADING_MAP_MODE,        uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_SHADING_STRENGTH,             uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_TONEMAP_MODE,                 uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_STREAM_ID,                    cam_stream_ID_t,             1);
+    INCLUDE(CAM_INTF_PARM_STATS_DEBUG_MASK,             uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_STATS_AF_PAAF,                uint32_t,                    1);
+    INCLUDE(CAM_INTF_PARM_FOCUS_BRACKETING,             cam_af_bracketing_t,         1);
+    INCLUDE(CAM_INTF_PARM_FLASH_BRACKETING,             cam_flash_bracketing_t,      1);
+    INCLUDE(CAM_INTF_META_JPEG_GPS_COORDINATES,         double,                      3);
+    INCLUDE(CAM_INTF_META_JPEG_GPS_PROC_METHODS,        uint8_t,                     GPS_PROCESSING_METHOD_SIZE);
+    INCLUDE(CAM_INTF_META_JPEG_GPS_TIMESTAMP,           int64_t,                     1);
+    INCLUDE(CAM_INTF_META_JPEG_ORIENTATION,             int32_t,                     1);
+    INCLUDE(CAM_INTF_META_JPEG_QUALITY,                 uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_JPEG_THUMB_QUALITY,           uint32_t,                    1);
+    INCLUDE(CAM_INTF_META_JPEG_THUMB_SIZE,              cam_dimension_t,             1);
+    INCLUDE(CAM_INTF_META_TEST_PATTERN_DATA,            cam_test_pattern_data_t,     1);
+    INCLUDE(CAM_INTF_META_PROFILE_TONE_CURVE,           cam_profile_tone_curve,      1);
+    INCLUDE(CAM_INTF_META_OTP_WB_GRGB,                  float,                       1);
+    INCLUDE(CAM_INTF_PARM_CAC,                          cam_aberration_mode_t,       1);
+    INCLUDE(CAM_INTF_META_NEUTRAL_COL_POINT,            cam_neutral_col_point_t,     1);
+    INCLUDE(CAM_INTF_PARM_ROTATION,                     cam_rotation_info_t,         1);
+    INCLUDE(CAM_INTF_META_IMGLIB,                       cam_intf_meta_imglib_t,      1);
+    INCLUDE(CAM_INTF_PARM_CAPTURE_FRAME_CONFIG,         cam_capture_frame_config_t,  1);
+    INCLUDE(CAM_INTF_PARM_FLIP,                         int32_t,                     1);
+    INCLUDE(CAM_INTF_META_USE_AV_TIMER,                 uint8_t,                     1);
+    INCLUDE(CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR,    float,                       1);
+    INCLUDE(CAM_INTF_META_LDAF_EXIF,                    uint32_t,                    2);
+    INCLUDE(CAM_INTF_META_BLACK_LEVEL_SOURCE_PATTERN,   cam_black_level_metadata_t,  1);
+    INCLUDE(CAM_INTF_META_BLACK_LEVEL_APPLIED_PATTERN,  cam_black_level_metadata_t,  1);
+    INCLUDE(CAM_INTF_META_DAEMON_RESTART,               uint8_t,                     1);
+} metadata_data_t;
+
+/* Update clear_metadata_buffer() function when a new is_xxx_valid is added to
+ * or removed from this structure */
+typedef struct {
+    union{
+        /* Hash table of 'is valid' flags */
+        uint8_t         is_valid[CAM_INTF_PARM_MAX];
+
+        /* Hash table of 'is required' flags for the GET PARAM */
+        uint8_t         is_reqd[CAM_INTF_PARM_MAX];
+    };
+    metadata_data_t data;
+    /*Tuning Data */
+    uint8_t is_tuning_params_valid;
+    tuning_params_t tuning_params;
+
+    /* Mobicat Params */
+    uint8_t is_mobicat_aec_params_valid;
+    cam_3a_params_t mobicat_aec_params;
+
+    /* Stats 3A Debug Params */
+    uint8_t is_statsdebug_ae_params_valid;
+    cam_ae_exif_debug_t statsdebug_ae_data;
+
+    uint8_t is_statsdebug_awb_params_valid;
+    cam_awb_exif_debug_t statsdebug_awb_data;
+
+    uint8_t is_statsdebug_af_params_valid;
+    cam_af_exif_debug_t statsdebug_af_data;
+
+    uint8_t is_statsdebug_asd_params_valid;
+    cam_asd_exif_debug_t statsdebug_asd_data;
+
+    uint8_t is_statsdebug_stats_params_valid;
+    cam_stats_buffer_exif_debug_t statsdebug_stats_buffer_data;
+} metadata_buffer_t;
+
+typedef metadata_buffer_t parm_buffer_t;
+
+#ifdef  __cplusplus
+extern "C" {
+#endif
+
+/* Update this inline function when a new is_xxx_valid is added to
+ * or removed from metadata_buffer_t */
+static inline void clear_metadata_buffer(metadata_buffer_t *meta)
+{
+    memset(meta->is_valid, 0, CAM_INTF_PARM_MAX);
+    meta->is_tuning_params_valid = 0;
+    meta->is_mobicat_aec_params_valid = 0;
+    meta->is_statsdebug_ae_params_valid = 0;
+    meta->is_statsdebug_awb_params_valid = 0;
+    meta->is_statsdebug_af_params_valid = 0;
+    meta->is_statsdebug_asd_params_valid = 0;
+    meta->is_statsdebug_stats_params_valid = 0;
+}
+
+#ifdef  __cplusplus
+}
+#endif
+
+#endif /* __QCAMERA_INTF_H__ */
diff --git a/camera/QCamera2/stack/common/cam_list.h b/camera/QCamera2/stack/common/cam_list.h
new file mode 100755
index 0000000..e92041c
--- /dev/null
+++ b/camera/QCamera2/stack/common/cam_list.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/* This file is a slave copy from /vendor/qcom/propreitary/mm-cammera/common,
+ * Please do not modify it directly here. */
+
+#ifndef __CAMLIST_H
+#define __CAMLIST_H
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define member_of(ptr, type, member) ({ \
+  const typeof(((type *)0)->member) *__mptr = (ptr); \
+  (type *)((char *)__mptr - offsetof(type,member));})
+
+struct cam_list {
+  struct cam_list *next, *prev;
+};
+
+static inline void cam_list_init(struct cam_list *ptr)
+{
+  ptr->next = ptr;
+  ptr->prev = ptr;
+}
+
+static inline void cam_list_add_tail_node(struct cam_list *item,
+  struct cam_list *head)
+{
+  struct cam_list *prev = head->prev;
+
+  head->prev = item;
+  item->next = head;
+  item->prev = prev;
+  prev->next = item;
+}
+
+static inline void cam_list_insert_before_node(struct cam_list *item,
+  struct cam_list *node)
+{
+  item->next = node;
+  item->prev = node->prev;
+  item->prev->next = item;
+  node->prev = item;
+}
+
+static inline void cam_list_del_node(struct cam_list *ptr)
+{
+  struct cam_list *prev = ptr->prev;
+  struct cam_list *next = ptr->next;
+
+  next->prev = ptr->prev;
+  prev->next = ptr->next;
+  ptr->next = ptr;
+  ptr->prev = ptr;
+}
+
+#endif /* __CAMLIST_H */
diff --git a/camera/QCamera2/stack/common/cam_queue.h b/camera/QCamera2/stack/common/cam_queue.h
new file mode 100755
index 0000000..a23c622
--- /dev/null
+++ b/camera/QCamera2/stack/common/cam_queue.h
@@ -0,0 +1,130 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "cam_list.h"
+
+typedef struct {
+    struct cam_list list;
+    void *data;
+} cam_node_t;
+
+typedef struct {
+    cam_node_t head; /* dummy head */
+    uint32_t size;
+    pthread_mutex_t lock;
+} cam_queue_t;
+
+static inline int32_t cam_queue_init(cam_queue_t *queue)
+{
+    pthread_mutex_init(&queue->lock, NULL);
+    cam_list_init(&queue->head.list);
+    queue->size = 0;
+    return 0;
+}
+
+static inline int32_t cam_queue_enq(cam_queue_t *queue, void *data)
+{
+    cam_node_t *node =
+        (cam_node_t *)malloc(sizeof(cam_node_t));
+    if (NULL == node) {
+        return -1;
+    }
+
+    memset(node, 0, sizeof(cam_node_t));
+    node->data = data;
+
+    pthread_mutex_lock(&queue->lock);
+    cam_list_add_tail_node(&node->list, &queue->head.list);
+    queue->size++;
+    pthread_mutex_unlock(&queue->lock);
+
+    return 0;
+}
+
+static inline void *cam_queue_deq(cam_queue_t *queue)
+{
+    cam_node_t *node = NULL;
+    void *data = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    pthread_mutex_lock(&queue->lock);
+    head = &queue->head.list;
+    pos = head->next;
+    if (pos != head) {
+        node = member_of(pos, cam_node_t, list);
+        cam_list_del_node(&node->list);
+        queue->size--;
+    }
+    pthread_mutex_unlock(&queue->lock);
+
+    if (NULL != node) {
+        data = node->data;
+        free(node);
+    }
+
+    return data;
+}
+
+static inline int32_t cam_queue_flush(cam_queue_t *queue)
+{
+    cam_node_t *node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    pthread_mutex_lock(&queue->lock);
+    head = &queue->head.list;
+    pos = head->next;
+
+    while(pos != head) {
+        node = member_of(pos, cam_node_t, list);
+        pos = pos->next;
+        cam_list_del_node(&node->list);
+        queue->size--;
+
+        /* TODO later to consider ptr inside data */
+        /* for now we only assume there is no ptr inside data
+         * so we free data directly */
+        if (NULL != node->data) {
+            free(node->data);
+        }
+        free(node);
+
+    }
+    queue->size = 0;
+    pthread_mutex_unlock(&queue->lock);
+    return 0;
+}
+
+static inline int32_t cam_queue_deinit(cam_queue_t *queue)
+{
+    cam_queue_flush(queue);
+    pthread_mutex_destroy(&queue->lock);
+    return 0;
+}
diff --git a/camera/QCamera2/stack/common/cam_semaphore.h b/camera/QCamera2/stack/common/cam_semaphore.h
new file mode 100644
index 0000000..a52f907
--- /dev/null
+++ b/camera/QCamera2/stack/common/cam_semaphore.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_SEMAPHORE_H__
+#define __QCAMERA_SEMAPHORE_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Implement semaphore with mutex and conditional variable.
+ * Reason being, POSIX semaphore on Android are not used or
+ * well tested.
+ */
+
+typedef struct {
+    int val;
+    pthread_mutex_t mutex;
+    pthread_cond_t cond;
+} cam_semaphore_t;
+
+static inline void cam_sem_init(cam_semaphore_t *s, int n)
+{
+    pthread_mutex_init(&(s->mutex), NULL);
+    pthread_cond_init(&(s->cond), NULL);
+    s->val = n;
+}
+
+static inline void cam_sem_post(cam_semaphore_t *s)
+{
+    pthread_mutex_lock(&(s->mutex));
+    s->val++;
+    pthread_cond_signal(&(s->cond));
+    pthread_mutex_unlock(&(s->mutex));
+}
+
+static inline int cam_sem_wait(cam_semaphore_t *s)
+{
+    int rc = 0;
+    pthread_mutex_lock(&(s->mutex));
+    while (s->val == 0)
+        rc = pthread_cond_wait(&(s->cond), &(s->mutex));
+    s->val--;
+    pthread_mutex_unlock(&(s->mutex));
+    return rc;
+}
+
+static inline void cam_sem_destroy(cam_semaphore_t *s)
+{
+    pthread_mutex_destroy(&(s->mutex));
+    pthread_cond_destroy(&(s->cond));
+    s->val = 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __QCAMERA_SEMAPHORE_H__ */
diff --git a/camera/QCamera2/stack/common/cam_types.h b/camera/QCamera2/stack/common/cam_types.h
new file mode 100644
index 0000000..2bffc91
--- /dev/null
+++ b/camera/QCamera2/stack/common/cam_types.h
@@ -0,0 +1,2295 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_TYPES_H__
+#define __QCAMERA_TYPES_H__
+
+#include <stdint.h>
+#include <pthread.h>
+#include <inttypes.h>
+#include <media/msmb_camera.h>
+
+#define CAM_MAX_NUM_BUFS_PER_STREAM 64
+#define MAX_METADATA_PRIVATE_PAYLOAD_SIZE_IN_BYTES 8096
+#define AWB_DEBUG_DATA_SIZE               (69189)
+#define AEC_DEBUG_DATA_SIZE               (3921)
+#define AF_DEBUG_DATA_SIZE                (8487)
+#define ASD_DEBUG_DATA_SIZE               (100)
+#define STATS_BUFFER_DEBUG_DATA_SIZE      (74773)
+
+#define CEILING64(X) (((X) + 0x0003F) & 0xFFFFFFC0)
+#define CEILING32(X) (((X) + 0x0001F) & 0xFFFFFFE0)
+#define CEILING16(X) (((X) + 0x000F) & 0xFFF0)
+#define CEILING4(X)  (((X) + 0x0003) & 0xFFFC)
+#define CEILING2(X)  (((X) + 0x0001) & 0xFFFE)
+
+#define MAX_ZOOMS_CNT 91
+#define MAX_SIZES_CNT 30
+#define MAX_EXP_BRACKETING_LENGTH 32
+#define MAX_ROI 5
+#define MAX_STREAM_NUM_IN_BUNDLE 8
+#define MAX_NUM_STREAMS          8
+#define CHROMATIX_SIZE 60000
+#define COMMONCHROMATIX_SIZE 45000
+#define CPPCHROMATIX_SIZE 36000
+#define SWPOSTPROCCHROMATIX_SIZE 36000
+#define AFTUNE_SIZE  10700
+#define MAX_SCALE_SIZES_CNT 8
+#define MAX_SAMP_DECISION_CNT     64
+#define SENSOR_PHYSICAL_SIZE_CNT  2
+#define EXPOSURE_TIME_RANGE_CNT   2
+#define BLACK_LEVEL_PATTERN_CNT   4
+#define FORWARD_MATRIX_COLS       3
+#define FORWARD_MATRIX_ROWS       3
+#define COLOR_TRANSFORM_COLS      3
+#define COLOR_TRANSFORM_ROWS      3
+#define CAL_TRANSFORM_COLS        3
+#define CAL_TRANSFORM_ROWS        3
+
+#define MAX_ISP_DATA_SIZE (20*1024)
+#define MAX_PP_DATA_SIZE  16384
+#define MAX_AE_STATS_DATA_SIZE  1000
+#define MAX_AWB_STATS_DATA_SIZE 1000
+#define MAX_AF_STATS_DATA_SIZE  1000
+#define MAX_ASD_STATS_DATA_SIZE 1000
+
+#define MAX_CAPTURE_BATCH_NUM 32
+
+#define TUNING_DATA_VERSION        3
+#define TUNING_SENSOR_DATA_MAX     0x10000 /*(need value from sensor team)*/
+#define TUNING_VFE_DATA_MAX        0x10000 /*(need value from vfe team)*/
+#define TUNING_CPP_DATA_MAX        0x10000 /*(need value from pproc team)*/
+#define TUNING_CAC_DATA_MAX        0x10000 /*(need value from imglib team)*/
+#define TUNING_DATA_MAX            (TUNING_SENSOR_DATA_MAX + \
+                                   TUNING_VFE_DATA_MAX + TUNING_CPP_DATA_MAX + \
+                                   TUNING_CAC_DATA_MAX)
+
+#define TUNING_SENSOR_DATA_OFFSET  0
+#define TUNING_VFE_DATA_OFFSET     TUNING_SENSOR_DATA_MAX
+#define TUNING_CPP_DATA_OFFSET     (TUNING_SENSOR_DATA_MAX + TUNING_VFE_DATA_MAX)
+#define TUNING_CAC_DATA_OFFSET     (TUNING_SENSOR_DATA_MAX + \
+                                   TUNING_VFE_DATA_MAX + TUNING_CPP_DATA_MAX)
+#define MAX_STATS_DATA_SIZE 4000
+
+#define MAX_AF_BRACKETING_VALUES 5
+#define MAX_TEST_PATTERN_CNT     8
+
+#define GPS_PROCESSING_METHOD_SIZE 33
+#define EXIF_IMAGE_DESCRIPTION_SIZE 100
+
+#define MAX_INFLIGHT_REQUESTS  6
+#define MAX_INFLIGHT_BLOB      2
+
+#ifdef OPTIMIZE_BUF_COUNT
+#undef MAX_INFLIGHT_BLOB
+#define MAX_INFLIGHT_BLOB      1
+#endif
+
+#define MIN_INFLIGHT_REQUESTS  3
+#define MAX_INFLIGHT_REPROCESS_REQUESTS 1
+#define MAX_INFLIGHT_HFR_REQUESTS (48)
+#define MIN_INFLIGHT_HFR_REQUESTS (40)
+
+#define QCAMERA_DUMP_FRM_LOCATION "/data/misc/camera/"
+#define QCAMERA_MAX_FILEPATH_LENGTH 64
+
+#define CAM_INTF_AEC_DATA_MAX   (10)
+#define LIKELY(x)       __builtin_expect((x), true)
+#define UNLIKELY(x)     __builtin_expect((x), false)
+
+#define MAX_REPROCESS_STALL 2
+
+#define QCAMERA_MAX_FILEPATH_LENGTH 64
+
+#define MAX_EEPROM_VERSION_INFO_LEN 32
+
+#define MAX_OPTICAL_BLACK_REGIONS 5
+
+/*reprocess pipeline stages are pproc and jpeg */
+#define MAX_REPROCESS_PIPELINE_STAGES 2
+
+typedef enum {
+    CAM_HAL_V1 = 1,
+    CAM_HAL_V3 = 3
+} cam_hal_version_t;
+
+typedef enum {
+    CAM_STATUS_SUCCESS,       /* Operation Succeded */
+    CAM_STATUS_FAILED,        /* Failure in doing operation */
+    CAM_STATUS_INVALID_PARM,  /* Inavlid parameter provided */
+    CAM_STATUS_NOT_SUPPORTED, /* Parameter/operation not supported */
+    CAM_STATUS_ACCEPTED,      /* Parameter accepted */
+    CAM_STATUS_MAX,
+} cam_status_t;
+
+typedef enum {
+    CAM_POSITION_BACK,
+    CAM_POSITION_FRONT
+} cam_position_t;
+
+typedef enum {
+    CAM_FLICKER_NONE,
+    CAM_FLICKER_50_HZ,
+    CAM_FLICKER_60_HZ
+} cam_flicker_t;
+
+typedef enum {
+    CAM_FORMAT_JPEG = 0,
+    CAM_FORMAT_YUV_420_NV12 = 1,
+    CAM_FORMAT_YUV_420_NV21,
+    CAM_FORMAT_YUV_420_NV21_ADRENO,
+    CAM_FORMAT_YUV_420_YV12,
+    CAM_FORMAT_YUV_422_NV16,
+    CAM_FORMAT_YUV_422_NV61,
+    CAM_FORMAT_YUV_420_NV12_VENUS,
+
+    /* Note: For all raw formats, each scanline needs to be 16 bytes aligned */
+
+    /* Packed YUV/YVU raw format, 16 bpp: 8 bits Y and 8 bits UV.
+     * U and V are interleaved with Y: YUYV or YVYV */
+    CAM_FORMAT_YUV_RAW_8BIT_YUYV,
+    CAM_FORMAT_YUV_RAW_8BIT_YVYU,
+    CAM_FORMAT_YUV_RAW_8BIT_UYVY,
+    CAM_FORMAT_YUV_RAW_8BIT_VYUY,
+
+    /* QCOM RAW formats where data is packed into 64bit word.
+     * 8BPP: 1 64-bit word contains 8 pixels p0 - p7, where p0 is
+     *       stored at LSB.
+     * 10BPP: 1 64-bit word contains 6 pixels p0 - p5, where most
+     *       significant 4 bits are set to 0. P0 is stored at LSB.
+     * 12BPP: 1 64-bit word contains 5 pixels p0 - p4, where most
+     *       significant 4 bits are set to 0. P0 is stored at LSB. */
+    CAM_FORMAT_BAYER_QCOM_RAW_8BPP_GBRG,
+    CAM_FORMAT_BAYER_QCOM_RAW_8BPP_GRBG,
+    CAM_FORMAT_BAYER_QCOM_RAW_8BPP_RGGB,
+    CAM_FORMAT_BAYER_QCOM_RAW_8BPP_BGGR,
+    CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG,
+    CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GRBG,
+    CAM_FORMAT_BAYER_QCOM_RAW_10BPP_RGGB,
+    CAM_FORMAT_BAYER_QCOM_RAW_10BPP_BGGR,
+    CAM_FORMAT_BAYER_QCOM_RAW_12BPP_GBRG,
+    CAM_FORMAT_BAYER_QCOM_RAW_12BPP_GRBG,
+    CAM_FORMAT_BAYER_QCOM_RAW_12BPP_RGGB,
+    CAM_FORMAT_BAYER_QCOM_RAW_12BPP_BGGR,
+    /* MIPI RAW formats based on MIPI CSI-2 specifiction.
+     * 8BPP: Each pixel occupies one bytes, starting at LSB.
+     *       Output with of image has no restrictons.
+     * 10BPP: Four pixels are held in every 5 bytes. The output
+     *       with of image must be a multiple of 4 pixels.
+     * 12BPP: Two pixels are held in every 3 bytes. The output
+     *       width of image must be a multiple of 2 pixels. */
+    CAM_FORMAT_BAYER_MIPI_RAW_8BPP_GBRG,
+    CAM_FORMAT_BAYER_MIPI_RAW_8BPP_GRBG,
+    CAM_FORMAT_BAYER_MIPI_RAW_8BPP_RGGB,
+    CAM_FORMAT_BAYER_MIPI_RAW_8BPP_BGGR,
+    CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG,
+    CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GRBG,
+    CAM_FORMAT_BAYER_MIPI_RAW_10BPP_RGGB,
+    CAM_FORMAT_BAYER_MIPI_RAW_10BPP_BGGR,
+    CAM_FORMAT_BAYER_MIPI_RAW_12BPP_GBRG,
+    CAM_FORMAT_BAYER_MIPI_RAW_12BPP_GRBG,
+    CAM_FORMAT_BAYER_MIPI_RAW_12BPP_RGGB,
+    CAM_FORMAT_BAYER_MIPI_RAW_12BPP_BGGR,
+    /* Ideal raw formats where image data has gone through black
+     * correction, lens rolloff, demux/channel gain, bad pixel
+     * correction, and ABF.
+     * Ideal raw formats could output any of QCOM_RAW and MIPI_RAW
+     * formats, plus plain8 8bbp, plain16 800, plain16 10bpp, and
+     * plain 16 12bpp */
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_BGGR,
+
+    /* generic 8-bit raw */
+    CAM_FORMAT_JPEG_RAW_8BIT,
+    CAM_FORMAT_META_RAW_8BIT,
+
+    /* QCOM RAW formats where data is packed into 64bit word.
+     * 14BPP: 1 64-bit word contains 4 pixels p0 - p3, where most
+     *       significant 4 bits are set to 0. P0 is stored at LSB.
+     */
+    CAM_FORMAT_BAYER_QCOM_RAW_14BPP_GBRG,
+    CAM_FORMAT_BAYER_QCOM_RAW_14BPP_GRBG,
+    CAM_FORMAT_BAYER_QCOM_RAW_14BPP_RGGB,
+    CAM_FORMAT_BAYER_QCOM_RAW_14BPP_BGGR,
+    /* MIPI RAW formats based on MIPI CSI-2 specifiction.
+     * 14 BPPP: 1st byte: P0 [13:6]
+     *          2nd byte: P1 [13:6]
+     *          3rd byte: P2 [13:6]
+     *          4th byte: P3 [13:6]
+     *          5th byte: P0 [5:0]
+     *          7th byte: P1 [5:0]
+     *          8th byte: P2 [5:0]
+     *          9th byte: P3 [5:0]
+     */
+    CAM_FORMAT_BAYER_MIPI_RAW_14BPP_GBRG,
+    CAM_FORMAT_BAYER_MIPI_RAW_14BPP_GRBG,
+    CAM_FORMAT_BAYER_MIPI_RAW_14BPP_RGGB,
+    CAM_FORMAT_BAYER_MIPI_RAW_14BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_14BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_14BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_14BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_14BPP_BGGR,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_14BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_14BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_14BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_14BPP_BGGR,
+    /* 14BPP: 1st byte: P0 [8:0]
+     *        2nd byte: P0 [13:9]
+     *        3rd byte: P1 [8:0]
+     *        4th byte: P1 [13:9]
+     */
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_14BPP_GBRG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_14BPP_GRBG,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_14BPP_RGGB,
+    CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_14BPP_BGGR,
+
+    CAM_FORMAT_YUV_444_NV24,
+    CAM_FORMAT_YUV_444_NV42,
+
+    /* Y plane only, used for FD */
+    CAM_FORMAT_Y_ONLY,
+
+    CAM_FORMAT_MAX
+} cam_format_t;
+
+typedef enum {
+    /* applies to HAL 1 */
+    CAM_STREAM_TYPE_DEFAULT,       /* default stream type */
+    CAM_STREAM_TYPE_PREVIEW,       /* preview */
+    CAM_STREAM_TYPE_POSTVIEW,      /* postview */
+    CAM_STREAM_TYPE_SNAPSHOT,      /* snapshot */
+    CAM_STREAM_TYPE_VIDEO,         /* video */
+
+    /* applies to HAL 3 */
+    CAM_STREAM_TYPE_CALLBACK,      /* app requested callback */
+    CAM_STREAM_TYPE_IMPL_DEFINED, /* opaque format: could be display, video enc, ZSL YUV */
+
+    /* applies to both HAL 1 and HAL 3 */
+    CAM_STREAM_TYPE_METADATA,      /* meta data */
+    CAM_STREAM_TYPE_RAW,           /* raw dump from camif */
+    CAM_STREAM_TYPE_OFFLINE_PROC,  /* offline process */
+    CAM_STREAM_TYPE_PARM,         /* mct internal stream */
+    CAM_STREAM_TYPE_ANALYSIS,     /* analysis stream */
+    CAM_STREAM_TYPE_MAX,
+} cam_stream_type_t;
+
+typedef enum {
+    CAM_PAD_NONE = 1,
+    CAM_PAD_TO_2 = 2,
+    CAM_PAD_TO_4 = 4,
+    CAM_PAD_TO_WORD = CAM_PAD_TO_4,
+    CAM_PAD_TO_8 = 8,
+    CAM_PAD_TO_16 = 16,
+    CAM_PAD_TO_32 = 32,
+    CAM_PAD_TO_64 = 64,
+    CAM_PAD_TO_1K = 1024,
+    CAM_PAD_TO_2K = 2048,
+    CAM_PAD_TO_4K = 4096,
+    CAM_PAD_TO_8K = 8192
+} cam_pad_format_t;
+
+typedef enum {
+    /* followings are per camera */
+    CAM_MAPPING_BUF_TYPE_CAPABILITY,  /* mapping camera capability buffer */
+    CAM_MAPPING_BUF_TYPE_PARM_BUF,    /* mapping parameters buffer */
+
+    /* followings are per stream */
+    CAM_MAPPING_BUF_TYPE_STREAM_BUF,        /* mapping stream buffers */
+    CAM_MAPPING_BUF_TYPE_STREAM_INFO,       /* mapping stream information buffer */
+    CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF, /* mapping offline process input buffer */
+    CAM_MAPPING_BUF_TYPE_OFFLINE_META_BUF,  /* mapping offline meta buffer */
+    CAM_MAPPING_BUF_TYPE_MISC_BUF,          /* mapping offline miscellaneous buffer */
+    CAM_MAPPING_BUF_TYPE_STREAM_USER_BUF,   /* mapping user ptr stream buffers */
+    CAM_MAPPING_BUF_TYPE_MAX
+} cam_mapping_buf_type;
+
+typedef enum {
+    CAM_STREAM_BUF_TYPE_MPLANE,  /* Multiplanar Buffer type */
+    CAM_STREAM_BUF_TYPE_USERPTR, /* User specific structure pointer*/
+    CAM_STREAM_BUF_TYPE_MAX
+} cam_stream_buf_type;
+
+/* values that persist.camera.global.debug can be set to */
+/* all camera modules need to map their internal debug levels to this range */
+typedef enum {
+    CAM_GLBL_DBG_ERR    = 0,
+    CAM_GLBL_DBG_HIGH   = 1,
+    CAM_GLBL_DBG_LOW    = 2,
+} cam_global_debug_level_t;
+
+typedef struct {
+    cam_mapping_buf_type type;
+    uint32_t stream_id;   /* stream id: valid if STREAM_BUF */
+    uint32_t frame_idx;   /* frame index: valid if type is STREAM_BUF */
+    int32_t plane_idx;    /* planner index. valid if type is STREAM_BUF.
+                           * -1 means all planners shanre the same fd;
+                           * otherwise, each planner has its own fd */
+    uint32_t cookie;      /* could be job_id(uint32_t) to identify mapping job */
+    int32_t fd;           /* origin fd */
+    size_t size;          /* size of the buffer */
+} cam_buf_map_type;
+
+typedef struct {
+    cam_mapping_buf_type type;
+    uint32_t stream_id;   /* stream id: valid if STREAM_BUF */
+    uint32_t frame_idx;   /* frame index: valid if STREAM_BUF or HIST_BUF */
+    int32_t plane_idx;    /* planner index. valid if type is STREAM_BUF.
+                           * -1 means all planners shanre the same fd;
+                           * otherwise, each planner has its own fd */
+    uint32_t cookie;      /* could be job_id(uint32_t) to identify unmapping job */
+} cam_buf_unmap_type;
+
+typedef enum {
+    CAM_MAPPING_TYPE_FD_MAPPING,
+    CAM_MAPPING_TYPE_FD_UNMAPPING,
+    CAM_MAPPING_TYPE_MAX
+} cam_mapping_type;
+
+typedef struct {
+    cam_mapping_type msg_type;
+    union {
+        cam_buf_map_type buf_map;
+        cam_buf_unmap_type buf_unmap;
+    } payload;
+} cam_sock_packet_t;
+
+typedef enum {
+    CAM_MODE_2D = (1<<0),
+    CAM_MODE_3D = (1<<1)
+} cam_mode_t;
+
+typedef struct {
+    uint32_t len;
+    uint32_t y_offset;
+    uint32_t cbcr_offset;
+} cam_sp_len_offset_t;
+
+typedef struct{
+    uint32_t len;
+    uint32_t offset;
+    int32_t offset_x;
+    int32_t offset_y;
+    int32_t stride;
+    int32_t stride_in_bytes;
+    int32_t scanline;
+    int32_t width;    /* width without padding */
+    int32_t height;   /* height without padding */
+} cam_mp_len_offset_t;
+
+typedef struct {
+    uint32_t width_padding;
+    uint32_t height_padding;
+    uint32_t plane_padding;
+    uint32_t min_stride;
+    uint32_t min_scanline;
+} cam_padding_info_t;
+
+typedef struct {
+    uint32_t num_planes;    /*Number of planes in planar buffer*/
+    union {
+        cam_sp_len_offset_t sp;
+        cam_mp_len_offset_t mp[VIDEO_MAX_PLANES];
+    };
+    uint32_t frame_len;
+} cam_frame_len_offset_t;
+
+typedef struct {
+    uint8_t frame_buf_cnt;  /*Total plane frames present in 1 batch*/
+    uint32_t size;          /*Size of 1 batch buffer. Kernel structure size*/
+    long frameInterval;     /*frame interval between each frame*/
+} cam_stream_user_buf_info_t;
+
+typedef struct {
+    int32_t width;
+    int32_t height;
+} cam_dimension_t;
+
+typedef struct {
+    cam_frame_len_offset_t plane_info;
+} cam_stream_buf_plane_info_t;
+
+typedef struct {
+    float min_fps;
+    float max_fps;
+    float video_min_fps;
+    float video_max_fps;
+} cam_fps_range_t;
+
+typedef struct {
+    int32_t min_sensitivity;
+    int32_t max_sensitivity;
+} cam_sensitivity_range_t;
+
+typedef enum {
+    CAM_HFR_MODE_OFF,
+    CAM_HFR_MODE_60FPS,
+    CAM_HFR_MODE_90FPS,
+    CAM_HFR_MODE_120FPS,
+    CAM_HFR_MODE_150FPS,
+    CAM_HFR_MODE_180FPS,
+    CAM_HFR_MODE_210FPS,
+    CAM_HFR_MODE_240FPS,
+    CAM_HFR_MODE_480FPS,
+    CAM_HFR_MODE_MAX
+} cam_hfr_mode_t;
+
+typedef struct {
+    cam_hfr_mode_t mode;
+    cam_dimension_t dim;
+    uint8_t frame_skip;
+    uint8_t livesnapshot_sizes_tbl_cnt;                     /* livesnapshot sizes table size */
+    cam_dimension_t livesnapshot_sizes_tbl[MAX_SIZES_CNT];  /* livesnapshot sizes table */
+} cam_hfr_info_t;
+
+typedef enum {
+    CAM_WB_MODE_AUTO,
+    CAM_WB_MODE_CUSTOM,
+    CAM_WB_MODE_INCANDESCENT,
+    CAM_WB_MODE_FLUORESCENT,
+    CAM_WB_MODE_WARM_FLUORESCENT,
+    CAM_WB_MODE_DAYLIGHT,
+    CAM_WB_MODE_CLOUDY_DAYLIGHT,
+    CAM_WB_MODE_TWILIGHT,
+    CAM_WB_MODE_SHADE,
+    CAM_WB_MODE_MANUAL,
+    CAM_WB_MODE_OFF,
+    CAM_WB_MODE_MAX
+} cam_wb_mode_type;
+
+typedef enum {
+    CAM_ANTIBANDING_MODE_OFF,
+    CAM_ANTIBANDING_MODE_60HZ,
+    CAM_ANTIBANDING_MODE_50HZ,
+    CAM_ANTIBANDING_MODE_AUTO,
+    CAM_ANTIBANDING_MODE_AUTO_50HZ,
+    CAM_ANTIBANDING_MODE_AUTO_60HZ,
+    CAM_ANTIBANDING_MODE_MAX,
+} cam_antibanding_mode_type;
+
+/* Enum Type for different ISO Mode supported */
+typedef enum {
+    CAM_ISO_MODE_AUTO,
+    CAM_ISO_MODE_DEBLUR,
+    CAM_ISO_MODE_100,
+    CAM_ISO_MODE_200,
+    CAM_ISO_MODE_400,
+    CAM_ISO_MODE_800,
+    CAM_ISO_MODE_1600,
+    CAM_ISO_MODE_3200,
+    CAM_ISO_MODE_MAX
+} cam_iso_mode_type;
+
+typedef enum {
+    CAM_AEC_MODE_FRAME_AVERAGE,
+    CAM_AEC_MODE_CENTER_WEIGHTED,
+    CAM_AEC_MODE_SPOT_METERING,
+    CAM_AEC_MODE_SMART_METERING,
+    CAM_AEC_MODE_USER_METERING,
+    CAM_AEC_MODE_SPOT_METERING_ADV,
+    CAM_AEC_MODE_CENTER_WEIGHTED_ADV,
+    CAM_AEC_MODE_MAX
+} cam_auto_exposure_mode_type;
+
+typedef enum {
+    CAM_AE_MODE_OFF,
+    CAM_AE_MODE_ON,
+    CAM_AE_MODE_MAX
+} cam_ae_mode_type;
+
+typedef enum {
+    CAM_FOCUS_ALGO_AUTO,
+    CAM_FOCUS_ALGO_SPOT,
+    CAM_FOCUS_ALGO_CENTER_WEIGHTED,
+    CAM_FOCUS_ALGO_AVERAGE,
+    CAM_FOCUS_ALGO_MAX
+} cam_focus_algorithm_type;
+
+/* Auto focus mode */
+typedef enum {
+    CAM_FOCUS_MODE_OFF,
+    CAM_FOCUS_MODE_AUTO,
+    CAM_FOCUS_MODE_INFINITY,
+    CAM_FOCUS_MODE_MACRO,
+    CAM_FOCUS_MODE_FIXED,
+    CAM_FOCUS_MODE_EDOF,
+    CAM_FOCUS_MODE_CONTINOUS_VIDEO,
+    CAM_FOCUS_MODE_CONTINOUS_PICTURE,
+    CAM_FOCUS_MODE_MANUAL,
+    CAM_FOCUS_MODE_MAX
+} cam_focus_mode_type;
+
+typedef enum {
+    CAM_MANUAL_FOCUS_MODE_INDEX,
+    CAM_MANUAL_FOCUS_MODE_DAC_CODE,
+    CAM_MANUAL_FOCUS_MODE_RATIO,
+    CAM_MANUAL_FOCUS_MODE_DIOPTER,
+    CAM_MANUAL_FOCUS_MODE_MAX
+} cam_manual_focus_mode_type;
+
+typedef struct {
+    cam_manual_focus_mode_type flag;
+    union{
+        int32_t af_manual_lens_position_index;
+        int32_t af_manual_lens_position_dac;
+        int32_t af_manual_lens_position_ratio;
+        float af_manual_diopter;
+    };
+} cam_manual_focus_parm_t;
+
+typedef enum {
+    CAM_MANUAL_WB_MODE_CCT,
+    CAM_MANUAL_WB_MODE_GAIN,
+    CAM_MANUAL_WB_MODE_MAX
+} cam_manual_wb_mode_type;
+
+typedef struct {
+    float r_gain;
+    float g_gain;
+    float b_gain;
+} cam_awb_gain_t;
+
+typedef struct {
+    cam_manual_wb_mode_type type;
+    union{
+        int32_t cct;
+        cam_awb_gain_t gains;
+    };
+} cam_manual_wb_parm_t;
+
+typedef enum {
+    CAM_SCENE_MODE_OFF,
+    CAM_SCENE_MODE_AUTO,
+    CAM_SCENE_MODE_LANDSCAPE,
+    CAM_SCENE_MODE_SNOW,
+    CAM_SCENE_MODE_BEACH,
+    CAM_SCENE_MODE_SUNSET,
+    CAM_SCENE_MODE_NIGHT,
+    CAM_SCENE_MODE_PORTRAIT,
+    CAM_SCENE_MODE_BACKLIGHT,
+    CAM_SCENE_MODE_SPORTS,
+    CAM_SCENE_MODE_ANTISHAKE,
+    CAM_SCENE_MODE_FLOWERS,
+    CAM_SCENE_MODE_CANDLELIGHT,
+    CAM_SCENE_MODE_FIREWORKS,
+    CAM_SCENE_MODE_PARTY,
+    CAM_SCENE_MODE_NIGHT_PORTRAIT,
+    CAM_SCENE_MODE_THEATRE,
+    CAM_SCENE_MODE_ACTION,
+    CAM_SCENE_MODE_AR,
+    CAM_SCENE_MODE_FACE_PRIORITY,
+    CAM_SCENE_MODE_BARCODE,
+    CAM_SCENE_MODE_HDR,
+    CAM_SCENE_MODE_MAX
+} cam_scene_mode_type;
+
+typedef enum {
+    CAM_EFFECT_MODE_OFF,
+    CAM_EFFECT_MODE_MONO,
+    CAM_EFFECT_MODE_NEGATIVE,
+    CAM_EFFECT_MODE_SOLARIZE,
+    CAM_EFFECT_MODE_SEPIA,
+    CAM_EFFECT_MODE_POSTERIZE,
+    CAM_EFFECT_MODE_WHITEBOARD,
+    CAM_EFFECT_MODE_BLACKBOARD,
+    CAM_EFFECT_MODE_AQUA,
+    CAM_EFFECT_MODE_EMBOSS,
+    CAM_EFFECT_MODE_SKETCH,
+    CAM_EFFECT_MODE_NEON,
+    CAM_EFFECT_MODE_MAX
+} cam_effect_mode_type;
+
+typedef enum {
+    CAM_FLASH_MODE_OFF,
+    CAM_FLASH_MODE_AUTO,
+    CAM_FLASH_MODE_ON,
+    CAM_FLASH_MODE_TORCH,
+    CAM_FLASH_MODE_SINGLE,
+    CAM_FLASH_MODE_MAX
+} cam_flash_mode_t;
+
+// Flash States
+typedef enum {
+    CAM_FLASH_STATE_UNAVAILABLE,
+    CAM_FLASH_STATE_CHARGING,
+    CAM_FLASH_STATE_READY,
+    CAM_FLASH_STATE_FIRED,
+    CAM_FLASH_STATE_PARTIAL,
+    CAM_FLASH_STATE_MAX
+} cam_flash_state_t;
+
+typedef enum {
+    CAM_FLASH_FIRING_LEVEL_0,
+    CAM_FLASH_FIRING_LEVEL_1,
+    CAM_FLASH_FIRING_LEVEL_2,
+    CAM_FLASH_FIRING_LEVEL_3,
+    CAM_FLASH_FIRING_LEVEL_4,
+    CAM_FLASH_FIRING_LEVEL_5,
+    CAM_FLASH_FIRING_LEVEL_6,
+    CAM_FLASH_FIRING_LEVEL_7,
+    CAM_FLASH_FIRING_LEVEL_8,
+    CAM_FLASH_FIRING_LEVEL_9,
+    CAM_FLASH_FIRING_LEVEL_10,
+    CAM_FLASH_FIRING_LEVEL_MAX
+} cam_flash_firing_level_t;
+
+
+typedef enum {
+    CAM_AEC_TRIGGER_IDLE,
+    CAM_AEC_TRIGGER_START,
+    CAM_AEC_TRIGGER_CANCEL
+} cam_aec_trigger_type_t;
+
+typedef enum {
+    CAM_AF_TRIGGER_IDLE,
+    CAM_AF_TRIGGER_START,
+    CAM_AF_TRIGGER_CANCEL
+} cam_af_trigger_type_t;
+
+typedef enum {
+    CAM_AE_STATE_INACTIVE,
+    CAM_AE_STATE_SEARCHING,
+    CAM_AE_STATE_CONVERGED,
+    CAM_AE_STATE_LOCKED,
+    CAM_AE_STATE_FLASH_REQUIRED,
+    CAM_AE_STATE_PRECAPTURE
+} cam_ae_state_t;
+
+typedef enum {
+    CAM_NOISE_REDUCTION_MODE_OFF,
+    CAM_NOISE_REDUCTION_MODE_FAST,
+    CAM_NOISE_REDUCTION_MODE_HIGH_QUALITY,
+    CAM_NOISE_REDUCTION_MODE_MINIMAL,
+    CAM_NOISE_REDUCTION_MODE_ZERO_SHUTTER_LAG
+} cam_noise_reduction_mode_t;
+
+typedef enum {
+    CAM_EDGE_MODE_OFF,
+    CAM_EDGE_MODE_FAST,
+    CAM_EDGE_MODE_HIGH_QUALITY,
+    CAM_EDGE_MODE_ZERO_SHUTTER_LAG,
+} cam_edge_mode_t;
+
+typedef struct {
+   uint8_t edge_mode;
+   int32_t sharpness;
+} cam_edge_application_t;
+
+typedef enum {
+    CAM_BLACK_LEVEL_LOCK_OFF,
+    CAM_BLACK_LEVEL_LOCK_ON,
+} cam_black_level_lock_t;
+
+typedef enum {
+    CAM_HOTPIXEL_MODE_OFF,
+    CAM_HOTPIXEL_MODE_FAST,
+    CAM_HOTPIXEL_MODE_HIGH_QUALITY,
+} cam_hotpixel_mode_t;
+
+typedef enum {
+    CAM_LENS_SHADING_MAP_MODE_OFF,
+    CAM_LENS_SHADING_MAP_MODE_ON,
+} cam_lens_shading_map_mode_t;
+
+typedef enum {
+    CAM_LENS_SHADING_MODE_OFF,
+    CAM_LENS_SHADING_MODE_FAST,
+    CAM_LENS_SHADING_MODE_HIGH_QUALITY,
+} cam_lens_shading_mode_t;
+
+typedef enum {
+    CAM_FACE_DETECT_MODE_OFF,
+    CAM_FACE_DETECT_MODE_SIMPLE,
+    CAM_FACE_DETECT_MODE_FULL,
+} cam_face_detect_mode_t;
+
+typedef enum {
+    CAM_TONEMAP_MODE_CONTRAST_CURVE,
+    CAM_TONEMAP_MODE_FAST,
+    CAM_TONEMAP_MODE_HIGH_QUALITY,
+} cam_tonemap_mode_t;
+
+typedef enum {
+  CAM_CDS_MODE_OFF,
+  CAM_CDS_MODE_ON,
+  CAM_CDS_MODE_AUTO,
+  CAM_CDS_MODE_MAX
+} cam_cds_mode_type_t;
+
+typedef struct  {
+    int32_t left;
+    int32_t top;
+    int32_t width;
+    int32_t height;
+} cam_rect_t;
+
+typedef struct  {
+    cam_rect_t rect;
+    int32_t weight; /* weight of the area, valid for focusing/metering areas */
+} cam_area_t;
+
+typedef enum {
+    CAM_STREAMING_MODE_CONTINUOUS, /* continous streaming */
+    CAM_STREAMING_MODE_BURST,      /* burst streaming */
+    CAM_STREAMING_MODE_BATCH,      /* stream frames in batches */
+    CAM_STREAMING_MODE_MAX
+} cam_streaming_mode_t;
+
+typedef enum {
+    IS_TYPE_NONE,
+    IS_TYPE_DIS,
+    IS_TYPE_GA_DIS,
+    IS_TYPE_EIS_1_0,
+    IS_TYPE_EIS_2_0,
+    IS_TYPE_MAX
+} cam_is_type_t;
+
+typedef enum {
+    DIS_DISABLE,
+    DIS_ENABLE
+} cam_dis_mode_t;
+
+typedef enum {
+  NON_SECURE,
+  SECURE
+} cam_stream_secure_t;
+
+#define CAM_REPROCESS_MASK_TYPE_WNR (1<<0)
+
+/* event from server */
+typedef enum {
+    CAM_EVENT_TYPE_MAP_UNMAP_DONE  = (1<<0),
+    CAM_EVENT_TYPE_AUTO_FOCUS_DONE = (1<<1),
+    CAM_EVENT_TYPE_ZOOM_DONE       = (1<<2),
+    CAM_EVENT_TYPE_DAEMON_DIED     = (1<<3),
+    CAM_EVENT_TYPE_INT_TAKE_JPEG   = (1<<4),
+    CAM_EVENT_TYPE_INT_TAKE_RAW    = (1<<5),
+    CAM_EVENT_TYPE_DAEMON_PULL_REQ = (1<<6),
+    CAM_EVENT_TYPE_MAX
+} cam_event_type_t;
+
+typedef enum {
+    CAM_EXP_BRACKETING_OFF,
+    CAM_EXP_BRACKETING_ON
+} cam_bracket_mode;
+
+typedef struct {
+    cam_bracket_mode mode;
+    char values[MAX_EXP_BRACKETING_LENGTH];  /* user defined values */
+} cam_exp_bracketing_t;
+
+typedef struct {
+  uint32_t num_frames;
+  cam_exp_bracketing_t exp_val;
+} cam_hdr_bracketing_info_t;
+
+ typedef struct {
+    cam_bracket_mode mode;
+    int32_t values;  /* user defined values */
+} cam_capture_bracketing_t;
+
+typedef enum {
+    CAM_CAPTURE_NORMAL,
+    CAM_CAPTURE_FLASH,
+    CAM_CAPTURE_BRACKETING,
+    CAM_CAPTURE_MAX
+} cam_capture_type;
+
+typedef struct {
+    int32_t num_frames;     /*Num of frames requested on this quality*/
+    cam_capture_type type;  /*type of the capture request*/
+
+    /*union to strore values of capture type*/
+    union {
+        cam_flash_mode_t flash_mode;
+        cam_capture_bracketing_t hdr_mode;
+    };
+} cam_capture_settings_t;
+
+typedef struct {
+    uint32_t num_batch;  /*Number of frames batch requested*/
+    cam_capture_settings_t configs[MAX_CAPTURE_BATCH_NUM];
+} cam_capture_frame_config_t;
+
+typedef struct {
+    uint8_t chromatixData[CHROMATIX_SIZE];
+    uint8_t snapchromatixData[CHROMATIX_SIZE];
+    uint8_t common_chromatixData[COMMONCHROMATIX_SIZE];
+    uint8_t cpp_chromatixData[CPPCHROMATIX_SIZE];
+    uint8_t cpp_chromatixSnapData[CPPCHROMATIX_SIZE];
+    uint8_t postproc_chromatixData[SWPOSTPROCCHROMATIX_SIZE];
+} tune_chromatix_t;
+
+typedef struct {
+    uint8_t af_tuneData[AFTUNE_SIZE];
+} tune_autofocus_t;
+
+typedef struct {
+    uint8_t stepsize;
+    uint8_t direction;
+    int32_t num_steps;
+    uint8_t ttype;
+} tune_actuator_t;
+
+typedef struct {
+    uint8_t module;
+    uint8_t type;
+    int32_t value;
+} tune_cmd_t;
+
+typedef enum {
+    CAM_AEC_ROI_OFF,
+    CAM_AEC_ROI_ON
+} cam_aec_roi_ctrl_t;
+
+typedef enum {
+    CAM_AEC_ROI_BY_INDEX,
+    CAM_AEC_ROI_BY_COORDINATE,
+} cam_aec_roi_type_t;
+
+typedef struct {
+    uint32_t x;
+    uint32_t y;
+} cam_coordinate_type_t;
+
+typedef struct {
+    int32_t numerator;
+    int32_t denominator;
+} cam_rational_type_t;
+
+typedef struct {
+    cam_aec_roi_ctrl_t aec_roi_enable;
+    cam_aec_roi_type_t aec_roi_type;
+    union {
+        cam_coordinate_type_t coordinate[MAX_ROI];
+        uint32_t aec_roi_idx[MAX_ROI];
+    } cam_aec_roi_position;
+} cam_set_aec_roi_t;
+
+typedef struct {
+    uint32_t frm_id;
+    uint8_t num_roi;
+    cam_rect_t roi[MAX_ROI];
+    int32_t weight[MAX_ROI];
+    uint8_t is_multiwindow;
+} cam_roi_info_t;
+
+typedef enum {
+    CAM_WAVELET_DENOISE_YCBCR_PLANE,
+    CAM_WAVELET_DENOISE_CBCR_ONLY,
+    CAM_WAVELET_DENOISE_STREAMLINE_YCBCR,
+    CAM_WAVELET_DENOISE_STREAMLINED_CBCR
+} cam_denoise_process_type_t;
+
+typedef struct {
+    uint8_t denoise_enable;
+    cam_denoise_process_type_t process_plates;
+} cam_denoise_param_t;
+
+#define CAM_FACE_PROCESS_MASK_DETECTION    (1U<<0)
+#define CAM_FACE_PROCESS_MASK_RECOGNITION  (1U<<1)
+typedef struct {
+    uint32_t fd_mode;          /* mask of face process */
+    uint32_t num_fd;
+} cam_fd_set_parm_t;
+
+typedef enum {
+    QCAMERA_FD_PREVIEW,
+    QCAMERA_FD_SNAPSHOT
+}qcamera_face_detect_type_t;
+
+typedef enum {
+    CAM_FACE_CT_POINT_EYE_L_PUPIL,
+    CAM_FACE_CT_POINT_EYE_L_IN,
+    CAM_FACE_CT_POINT_EYE_L_OUT,
+    CAM_FACE_CT_POINT_EYE_L_UP,
+    CAM_FACE_CT_POINT_EYE_L_DOWN,
+    CAM_FACE_CT_POINT_EYE_R_PUPIL,
+    CAM_FACE_CT_POINT_EYE_R_IN,
+    CAM_FACE_CT_POINT_EYE_R_OUT,
+    CAM_FACE_CT_POINT_EYE_R_UP,
+    CAM_FACE_CT_POINT_EYE_R_DOWN,
+    CAM_FACE_CT_POINT_EYE_MAX
+} cam_face_ct_point_eye_t;
+
+typedef enum {
+    CAM_FACE_CT_POINT_FOREHEAD,
+    CAM_FACE_CT_POINT_FOREHEAD_MAX
+} cam_face_ct_point_forh_t;
+
+typedef enum {
+    CAM_FACE_CT_POINT_NOSE,
+    CAM_FACE_CT_POINT_NOSE_TIP,
+    CAM_FACE_CT_POINT_NOSE_L,
+    CAM_FACE_CT_POINT_NOSE_R,
+    CAM_FACE_CT_POINT_NOSE_L_0,
+    CAM_FACE_CT_POINT_NOSE_R_0,
+    CAM_FACE_CT_POINT_NOSE_L_1,
+    CAM_FACE_CT_POINT_NOSE_R_1,
+    CAM_FACE_CT_POINT_NOSE_MAX
+} cam_face_ct_point_nose_t;
+
+typedef enum {
+    CAM_FACE_CT_POINT_MOUTH_L,
+    CAM_FACE_CT_POINT_MOUTH_R,
+    CAM_FACE_CT_POINT_MOUTH_UP,
+    CAM_FACE_CT_POINT_MOUTH_DOWN,
+    CAM_FACE_CT_POINT_MOUTH_MAX
+} cam_face_ct_point_mouth_t;
+
+typedef enum {
+    CAM_FACE_CT_POINT_LIP_UP,
+    CAM_FACE_CT_POINT_LIP_DOWN,
+    CAM_FACE_CT_POINT_LIP_MAX
+} cam_face_ct_point_lip_t;
+
+typedef enum {
+    CAM_FACE_CT_POINT_BROW_L_UP,
+    CAM_FACE_CT_POINT_BROW_L_DOWN,
+    CAM_FACE_CT_POINT_BROW_L_IN,
+    CAM_FACE_CT_POINT_BROW_L_OUT,
+    CAM_FACE_CT_POINT_BROW_R_UP,
+    CAM_FACE_CT_POINT_BROW_R_DOWN,
+    CAM_FACE_CT_POINT_BROW_R_IN,
+    CAM_FACE_CT_POINT_BROW_R_OUT,
+    CAM_FACE_CT_POINT_BROW_MAX
+} cam_face_ct_point_brow_t;
+
+typedef enum {
+    CAM_FACE_CT_POINT_CHIN,
+    CAM_FACE_CT_POINT_CHIN_L,
+    CAM_FACE_CT_POINT_CHIN_R,
+    CAM_FACE_CT_POINT_CHIN_MAX
+} cam_face_ct_point_chin_t;
+
+typedef enum {
+    CAM_FACE_CT_POINT_EAR_L_DOWN,
+    CAM_FACE_CT_POINT_EAR_R_DOWN,
+    CAM_FACE_CT_POINT_EAR_L_UP,
+    CAM_FACE_CT_POINT_EAR_R_UP,
+    CAM_FACE_CT_POINT_EAR_MAX
+} cam_face_ct_point_ear_t;
+
+typedef struct {
+  uint8_t is_eye_valid;
+  cam_coordinate_type_t contour_eye_pt[CAM_FACE_CT_POINT_EYE_MAX];
+  uint8_t is_forehead_valid;
+  cam_coordinate_type_t contour_forh_pt[CAM_FACE_CT_POINT_FOREHEAD_MAX];
+  uint8_t is_nose_valid;
+  cam_coordinate_type_t contour_nose_pt[CAM_FACE_CT_POINT_NOSE_MAX];
+  uint8_t is_mouth_valid;
+  cam_coordinate_type_t contour_mouth_pt[CAM_FACE_CT_POINT_MOUTH_MAX];
+  uint8_t is_lip_valid;
+  cam_coordinate_type_t contour_lip_pt[CAM_FACE_CT_POINT_LIP_MAX];
+  uint8_t is_brow_valid;
+  cam_coordinate_type_t contour_brow_pt[CAM_FACE_CT_POINT_BROW_MAX];
+  uint8_t is_chin_valid;
+  cam_coordinate_type_t contour_chin_pt[CAM_FACE_CT_POINT_CHIN_MAX];
+  uint8_t is_ear_valid;
+  cam_coordinate_type_t contour_ear_pt[CAM_FACE_CT_POINT_EAR_MAX];
+} cam_face_detect_contour_t;
+
+typedef struct {
+    int32_t face_id;            /* unique id for face tracking within view unless view changes */
+    int8_t score;              /* score of confidence (0, -100) */
+    cam_rect_t face_boundary;  /* boundary of face detected */
+    cam_coordinate_type_t left_eye_center;  /* coordinate of center of left eye */
+    cam_coordinate_type_t right_eye_center; /* coordinate of center of right eye */
+    cam_coordinate_type_t mouth_center;     /* coordinate of center of mouth */
+    cam_face_detect_contour_t contour_info; /* face detection contour info */
+    uint8_t smile_degree;      /* smile degree (0, -100) */
+    uint8_t smile_confidence;  /* smile confidence (0, 100) */
+    uint8_t face_recognised;   /* if face is recognised */
+    int8_t gaze_angle;         /* -90 -45 0 45 90 for head left to rigth tilt */
+    int8_t updown_dir;         /* up down direction (-90, 90) */
+    int8_t leftright_dir;      /* left right direction (-90, 90) */
+    int8_t roll_dir;           /* roll direction (-90, 90) */
+    int8_t left_right_gaze;    /* left right gaze degree (-50, 50) */
+    int8_t top_bottom_gaze;    /* up down gaze degree (-50, 50) */
+    uint8_t blink_detected;    /* if blink is detected */
+    uint8_t left_blink;        /* left eye blink degeree (0, -100) */
+    uint8_t right_blink;       /* right eye blink degree (0, - 100) */
+} cam_face_detection_info_t;
+
+typedef struct {
+    uint32_t frame_id;                         /* frame index of which faces are detected */
+    uint8_t num_faces_detected;                /* number of faces detected */
+    cam_face_detection_info_t faces[MAX_ROI];  /* detailed information of faces detected */
+    qcamera_face_detect_type_t fd_type;        /* face detect for preview or snapshot frame*/
+    cam_dimension_t fd_frame_dim;              /* frame dims on which fd is applied */
+} cam_face_detection_data_t;
+
+#define CAM_HISTOGRAM_STATS_SIZE 256
+typedef struct {
+    uint32_t max_hist_value;
+    uint32_t hist_buf[CAM_HISTOGRAM_STATS_SIZE]; /* buf holding histogram stats data */
+} cam_histogram_data_t;
+
+typedef struct {
+    cam_histogram_data_t r_stats;
+    cam_histogram_data_t b_stats;
+    cam_histogram_data_t gr_stats;
+    cam_histogram_data_t gb_stats;
+} cam_bayer_hist_stats_t;
+
+typedef enum {
+    CAM_HISTOGRAM_TYPE_BAYER,
+    CAM_HISTOGRAM_TYPE_YUV
+} cam_histogram_type_t;
+
+typedef struct {
+    cam_histogram_type_t type;
+    union {
+        cam_bayer_hist_stats_t bayer_stats;
+        cam_histogram_data_t yuv_stats;
+    };
+} cam_hist_stats_t;
+
+enum cam_focus_distance_index{
+  CAM_FOCUS_DISTANCE_NEAR_INDEX,  /* 0 */
+  CAM_FOCUS_DISTANCE_OPTIMAL_INDEX,
+  CAM_FOCUS_DISTANCE_FAR_INDEX,
+  CAM_FOCUS_DISTANCE_MAX_INDEX
+};
+
+typedef struct {
+  float focus_distance[CAM_FOCUS_DISTANCE_MAX_INDEX];
+} cam_focus_distances_info_t;
+
+typedef struct {
+    uint32_t scale;
+    float diopter;
+} cam_focus_pos_info_t ;
+
+/* Different autofocus cycle when calling do_autoFocus
+ * CAM_AF_COMPLETE_EXISTING_SWEEP: Complete existing sweep
+ * if one is ongoing, and lock.
+ * CAM_AF_DO_ONE_FULL_SWEEP: Do one full sweep, regardless
+ * of the current state, and lock.
+ * CAM_AF_START_CONTINUOUS_SWEEP: Start continous sweep.
+ * After do_autoFocus, HAL receives an event: CAM_AF_FOCUSED,
+ * or CAM_AF_NOT_FOCUSED.
+ * cancel_autoFocus stops any lens movement.
+ * Each do_autoFocus call only produces 1 FOCUSED/NOT_FOCUSED
+ * event, not both.
+ */
+typedef enum {
+    CAM_AF_COMPLETE_EXISTING_SWEEP,
+    CAM_AF_DO_ONE_FULL_SWEEP,
+    CAM_AF_START_CONTINUOUS_SWEEP
+} cam_autofocus_cycle_t;
+
+typedef enum {
+    CAM_AF_SCANNING,
+    CAM_AF_FOCUSED,
+    CAM_AF_NOT_FOCUSED,
+    CAM_AF_INACTIVE
+} cam_autofocus_state_t;
+
+typedef struct {
+    cam_autofocus_state_t focus_state;           /* state of focus */
+    cam_focus_distances_info_t focus_dist;       /* focus distance */
+    cam_focus_mode_type focus_mode;        /* focus mode from backend */
+    uint32_t focused_frame_idx;
+    int32_t focus_pos;
+} cam_auto_focus_data_t;
+
+typedef struct {
+  uint32_t is_hdr_scene;
+  float    hdr_confidence;
+} cam_asd_hdr_scene_data_t;
+
+typedef struct {
+    uint32_t stream_id;
+    cam_rect_t crop;
+    cam_rect_t roi_map;
+} cam_stream_crop_info_t;
+
+typedef struct {
+    uint8_t num_of_streams;
+    cam_stream_crop_info_t crop_info[MAX_NUM_STREAMS];
+} cam_crop_data_t;
+
+typedef struct {
+    uint32_t stream_id;
+    uint32_t cds_enable;
+} cam_stream_cds_info_t;
+
+typedef struct {
+    uint8_t session_cds_enable;
+    uint8_t num_of_streams;
+    cam_stream_cds_info_t cds_info[MAX_NUM_STREAMS];
+} cam_cds_data_t;
+
+typedef enum {
+    DO_NOT_NEED_FUTURE_FRAME,
+    NEED_FUTURE_FRAME,
+} cam_prep_snapshot_state_t;
+
+#define CC_GAINS_COUNT  4
+
+typedef struct {
+    float gains[CC_GAINS_COUNT];
+} cam_color_correct_gains_t;
+
+typedef struct {
+    // If LED is ON and Burst Num > 1, this is first LED ON frame
+    uint32_t min_frame_idx;
+    // If LED is ON and Burst Num > 1, this is first LED Off frame after ON
+    uint32_t max_frame_idx;
+    // Used only when LED Is ON and burst num > 1
+    uint32_t num_led_on_frames;
+    // Skip count after LED is turned OFF
+    uint32_t frame_skip_count;
+    // Batch id for each picture request
+    uint32_t config_batch_idx;
+} cam_frame_idx_range_t;
+
+typedef enum {
+  S_NORMAL = 0,
+  S_SCENERY,
+  S_PORTRAIT,
+  S_PORTRAIT_BACKLIGHT,
+  S_SCENERY_BACKLIGHT,
+  S_BACKLIGHT,
+  S_MAX,
+} cam_auto_scene_t;
+
+typedef struct {
+   uint32_t meta_frame_id;
+} cam_meta_valid_t;
+
+typedef enum {
+    CAM_SENSOR_RAW,
+    CAM_SENSOR_YUV
+} cam_sensor_t;
+
+typedef struct {
+    cam_flash_mode_t flash_mode;
+    float            aperture_value;
+    cam_flash_state_t        flash_state;
+    float            focal_length;
+    float            f_number;
+    int32_t          sensing_method;
+    float            crop_factor;
+    cam_sensor_t sens_type;
+} cam_sensor_params_t;
+
+typedef enum {
+    CAM_METERING_MODE_UNKNOWN = 0,
+    CAM_METERING_MODE_AVERAGE = 1,
+    CAM_METERING_MODE_CENTER_WEIGHTED_AVERAGE = 2,
+    CAM_METERING_MODE_SPOT = 3,
+    CAM_METERING_MODE_MULTI_SPOT = 4,
+    CAM_METERING_MODE_PATTERN = 5,
+    CAM_METERING_MODE_PARTIAL = 6,
+    CAM_METERING_MODE_OTHER = 255,
+} cam_metering_mode_t;
+
+typedef struct {
+    float exp_time;
+    int32_t iso_value;
+    uint32_t flash_needed;
+    uint32_t settled;
+    cam_wb_mode_type wb_mode;
+    uint32_t metering_mode;
+    uint32_t exposure_program;
+    uint32_t exposure_mode;
+    uint32_t scenetype;
+    float brightness;
+} cam_3a_params_t;
+
+typedef struct {
+    uint64_t sw_version_number;
+    int32_t aec_debug_data_size;
+    char aec_private_debug_data[AEC_DEBUG_DATA_SIZE];
+} cam_ae_exif_debug_t;
+
+typedef struct {
+    int32_t cct_value;
+    cam_awb_gain_t rgb_gains;
+} cam_awb_params_t;
+
+typedef struct {
+    int32_t awb_debug_data_size;
+    char awb_private_debug_data[AWB_DEBUG_DATA_SIZE];
+} cam_awb_exif_debug_t;
+
+typedef struct {
+    int32_t af_debug_data_size;
+    int32_t haf_debug_data_size;
+    int32_t tof_debug_data_size;
+    int32_t dciaf_debug_data_size;
+    int32_t pdaf_debug_data_size;
+    char af_private_debug_data[AF_DEBUG_DATA_SIZE];
+} cam_af_exif_debug_t;
+
+typedef struct {
+    int32_t asd_debug_data_size;
+    char asd_private_debug_data[ASD_DEBUG_DATA_SIZE];
+} cam_asd_exif_debug_t;
+
+typedef struct {
+    int32_t bg_stats_buffer_size;
+    int32_t bhist_stats_buffer_size;
+    int32_t bg_config_buffer_size;
+    char stats_buffer_private_debug_data[STATS_BUFFER_DEBUG_DATA_SIZE];
+} cam_stats_buffer_exif_debug_t;
+
+/* 3A version*/
+typedef struct {
+    uint16_t major_version;
+    uint16_t minor_version;
+    uint16_t patch_version;
+    uint16_t new_feature_des;
+} cam_q3a_version_t;
+
+typedef struct {
+    uint32_t tuning_data_version;
+    size_t tuning_sensor_data_size;
+    size_t tuning_vfe_data_size;
+    size_t tuning_cpp_data_size;
+    size_t tuning_cac_data_size;
+    size_t tuning_cac_data_size2;
+    size_t tuning_mod3_data_size;
+    uint8_t  data[TUNING_DATA_MAX];
+}tuning_params_t;
+
+typedef struct {
+    int32_t event_type;
+    cam_dimension_t dim;
+    size_t size;
+    char path[QCAMERA_MAX_FILEPATH_LENGTH];
+} cam_int_evt_params_t;
+
+typedef struct {
+  uint8_t private_isp_data[MAX_ISP_DATA_SIZE];
+} cam_chromatix_lite_isp_t;
+
+typedef struct {
+  uint8_t private_pp_data[MAX_PP_DATA_SIZE];
+} cam_chromatix_lite_pp_t;
+
+typedef struct {
+  uint8_t private_stats_data[MAX_AE_STATS_DATA_SIZE];
+} cam_chromatix_lite_ae_stats_t;
+
+typedef struct {
+  uint8_t private_stats_data[MAX_AWB_STATS_DATA_SIZE];
+} cam_chromatix_lite_awb_stats_t;
+
+typedef struct {
+  uint8_t private_stats_data[MAX_AF_STATS_DATA_SIZE];
+} cam_chromatix_lite_af_stats_t;
+
+typedef struct {
+  uint8_t private_stats_data[MAX_ASD_STATS_DATA_SIZE];
+} cam_chromatix_lite_asd_stats_t;
+
+typedef struct {
+   uint32_t min_buffers;
+   uint32_t max_buffers;
+} cam_buffer_info_t;
+
+typedef struct {
+    cam_dimension_t stream_sizes[MAX_NUM_STREAMS];
+    uint32_t num_streams;
+    cam_stream_type_t type[MAX_NUM_STREAMS];
+    uint32_t postprocess_mask[MAX_NUM_STREAMS];
+    cam_buffer_info_t buffer_info;
+    cam_is_type_t is_type;
+} cam_stream_size_info_t;
+
+typedef struct {
+    uint32_t num_streams;
+    uint32_t streamID[MAX_NUM_STREAMS];
+} cam_stream_ID_t;
+
+typedef struct {
+    uint32_t frame_id;
+    uint32_t num_streams;
+    uint32_t stream_id[MAX_NUM_STREAMS];
+} cam_buf_divert_info_t;
+
+typedef  struct {
+    uint8_t is_stats_valid;               /* if histgram data is valid */
+    cam_hist_stats_t stats_data;          /* histogram data */
+
+    uint8_t is_faces_valid;               /* if face detection data is valid */
+    cam_face_detection_data_t faces_data; /* face detection result */
+
+    uint8_t is_focus_valid;               /* if focus data is valid */
+    cam_auto_focus_data_t focus_data;     /* focus data */
+
+    uint8_t is_crop_valid;                /* if crop data is valid */
+    cam_crop_data_t crop_data;            /* crop data */
+
+    uint8_t is_prep_snapshot_done_valid;  /* if prep snapshot done is valid */
+    cam_prep_snapshot_state_t prep_snapshot_done_state;  /* prepare snapshot done state */
+
+    /* if good frame idx range is valid */
+    uint8_t is_good_frame_idx_range_valid;
+    /* good frame idx range, make sure:
+     * 1. good_frame_idx_range.min_frame_idx > current_frame_idx
+     * 2. good_frame_idx_range.min_frame_idx - current_frame_idx < 100 */
+    cam_frame_idx_range_t good_frame_idx_range;
+
+    uint32_t is_hdr_scene_data_valid;
+    cam_asd_hdr_scene_data_t hdr_scene_data;
+    uint8_t is_asd_decision_valid;
+    cam_auto_scene_t scene; //scene type as decided by ASD
+
+    char private_metadata[MAX_METADATA_PRIVATE_PAYLOAD_SIZE_IN_BYTES];
+
+    /* AE parameters */
+    uint8_t is_3a_params_valid;
+    cam_3a_params_t cam_3a_params;
+
+    /* AE exif debug parameters */
+    uint8_t is_ae_exif_debug_valid;
+    cam_ae_exif_debug_t ae_exif_debug_params;
+
+    /* AWB exif debug parameters */
+    uint8_t is_awb_exif_debug_valid;
+    cam_awb_exif_debug_t awb_exif_debug_params;
+
+    /* AF exif debug parameters */
+    uint8_t is_af_exif_debug_valid;
+    cam_af_exif_debug_t af_exif_debug_params;
+
+    /* ASD exif debug parameters */
+    uint8_t is_asd_exif_debug_valid;
+    cam_asd_exif_debug_t asd_exif_debug_params;
+
+    /* Stats buffer exif debug parameters */
+    uint8_t is_stats_buffer_exif_debug_valid;
+    cam_stats_buffer_exif_debug_t stats_buffer_exif_debug_params;
+
+    /* AWB parameters */
+    uint8_t is_awb_params_valid;
+    cam_awb_params_t awb_params;
+
+    /* sensor parameters */
+    uint8_t is_sensor_params_valid;
+    cam_sensor_params_t sensor_params;
+
+    /* Meta valid params */
+    uint8_t is_meta_valid;
+    cam_meta_valid_t meta_valid_params;
+
+    /*Tuning Data*/
+    uint8_t is_tuning_params_valid;
+    tuning_params_t tuning_params;
+
+    uint8_t is_chromatix_lite_isp_valid;
+    cam_chromatix_lite_isp_t chromatix_lite_isp_data;
+
+    uint8_t is_chromatix_lite_pp_valid;
+    cam_chromatix_lite_pp_t chromatix_lite_pp_data;
+
+    uint8_t is_chromatix_lite_ae_stats_valid;
+    cam_chromatix_lite_ae_stats_t chromatix_lite_ae_stats_data;
+
+    uint8_t is_chromatix_lite_awb_stats_valid;
+    cam_chromatix_lite_awb_stats_t chromatix_lite_awb_stats_data;
+
+    uint8_t is_chromatix_lite_af_stats_valid;
+    cam_chromatix_lite_af_stats_t chromatix_lite_af_stats_data;
+} cam_metadata_info_t;
+
+typedef enum {
+    CAM_INTF_PARM_HAL_VERSION,
+
+    /* Overall mode of 3A control routines. We need to have this parameter
+     * because not all android.control.* have an OFF option, for example,
+     * AE_FPS_Range, aePrecaptureTrigger */
+    CAM_INTF_META_MODE,
+    /* Whether AE is currently updating the sensor exposure and sensitivity
+     * fields */
+    CAM_INTF_META_AEC_MODE,
+    CAM_INTF_PARM_WHITE_BALANCE,
+    CAM_INTF_PARM_FOCUS_MODE,
+
+    /* common between HAL1 and HAL3 */
+    CAM_INTF_PARM_ANTIBANDING,
+    CAM_INTF_PARM_EXPOSURE_COMPENSATION,
+    CAM_INTF_PARM_EV_STEP,
+    CAM_INTF_PARM_AEC_LOCK,
+    CAM_INTF_PARM_FPS_RANGE,
+    CAM_INTF_PARM_AWB_LOCK, /* 10 */
+    CAM_INTF_PARM_EFFECT,
+    CAM_INTF_PARM_BESTSHOT_MODE,
+    CAM_INTF_PARM_DIS_ENABLE,
+    CAM_INTF_PARM_LED_MODE,
+    CAM_INTF_META_HISTOGRAM,
+    CAM_INTF_META_FACE_DETECTION,
+    /* Whether optical image stabilization is enabled. */
+    CAM_INTF_META_LENS_OPT_STAB_MODE,
+    /* specific to HAl1 */
+    CAM_INTF_META_AUTOFOCUS_DATA,
+    CAM_INTF_PARM_QUERY_FLASH4SNAP,
+    CAM_INTF_PARM_EXPOSURE, /* 20 */
+    CAM_INTF_PARM_SHARPNESS,
+    CAM_INTF_PARM_CONTRAST,
+    CAM_INTF_PARM_SATURATION,
+    CAM_INTF_PARM_BRIGHTNESS,
+    CAM_INTF_PARM_ISO,
+    CAM_INTF_PARM_ZOOM,
+    CAM_INTF_PARM_ROLLOFF,
+    CAM_INTF_PARM_MODE,             /* camera mode */
+    CAM_INTF_PARM_AEC_ALGO_TYPE,    /* auto exposure algorithm */
+    CAM_INTF_PARM_FOCUS_ALGO_TYPE, /* 30 */ /* focus algorithm */
+    CAM_INTF_PARM_AEC_ROI,
+    CAM_INTF_PARM_AF_ROI,
+    CAM_INTF_PARM_SCE_FACTOR,
+    CAM_INTF_PARM_FD,
+    CAM_INTF_PARM_MCE,
+    CAM_INTF_PARM_HFR,
+    CAM_INTF_PARM_REDEYE_REDUCTION,
+    CAM_INTF_PARM_WAVELET_DENOISE,
+    CAM_INTF_PARM_TEMPORAL_DENOISE,
+    CAM_INTF_PARM_HISTOGRAM, /* 40 */
+    CAM_INTF_PARM_ASD_ENABLE,
+    CAM_INTF_PARM_RECORDING_HINT,
+    CAM_INTF_PARM_HDR,
+    CAM_INTF_PARM_MAX_DIMENSION,
+    CAM_INTF_PARM_RAW_DIMENSION,
+    CAM_INTF_PARM_FRAMESKIP,
+    CAM_INTF_PARM_ZSL_MODE,  /* indicating if it's running in ZSL mode */
+    CAM_INTF_PARM_BURST_NUM,
+    CAM_INTF_PARM_RETRO_BURST_NUM,
+    CAM_INTF_PARM_BURST_LED_ON_PERIOD, /* 50 */
+    CAM_INTF_PARM_HDR_NEED_1X, /* if HDR needs 1x output */
+    CAM_INTF_PARM_LOCK_CAF,
+    CAM_INTF_PARM_VIDEO_HDR,
+    CAM_INTF_PARM_SENSOR_HDR,
+    CAM_INTF_PARM_ROTATION,
+    CAM_INTF_PARM_SCALE,
+    CAM_INTF_PARM_VT, /* indicating if it's a Video Call Apllication */
+    CAM_INTF_META_CROP_DATA,
+    CAM_INTF_META_PREP_SNAPSHOT_DONE,
+    CAM_INTF_META_GOOD_FRAME_IDX_RANGE, /* 60 */
+    CAM_INTF_META_ASD_HDR_SCENE_DATA,
+    CAM_INTF_META_ASD_SCENE_TYPE,
+    CAM_INTF_META_CURRENT_SCENE,
+    CAM_INTF_META_AEC_INFO,
+    CAM_INTF_META_SENSOR_INFO,
+    CAM_INTF_META_ASD_SCENE_CAPTURE_TYPE,
+    CAM_INTF_META_CHROMATIX_LITE_ISP,
+    CAM_INTF_META_CHROMATIX_LITE_PP,
+    CAM_INTF_META_CHROMATIX_LITE_AE,
+    CAM_INTF_META_CHROMATIX_LITE_AWB, /* 70 */
+    CAM_INTF_META_CHROMATIX_LITE_AF,
+    CAM_INTF_META_CHROMATIX_LITE_ASD,
+    CAM_INTF_META_EXIF_DEBUG_AE,
+    CAM_INTF_META_EXIF_DEBUG_AWB,
+    CAM_INTF_META_EXIF_DEBUG_AF,
+    CAM_INTF_META_EXIF_DEBUG_ASD,
+    CAM_INTF_META_EXIF_DEBUG_STATS,
+    CAM_INTF_PARM_GET_CHROMATIX,
+    CAM_INTF_PARM_SET_RELOAD_CHROMATIX,
+    CAM_INTF_PARM_SET_AUTOFOCUSTUNING, /* 80 */
+    CAM_INTF_PARM_GET_AFTUNE,
+    CAM_INTF_PARM_SET_RELOAD_AFTUNE,
+    CAM_INTF_PARM_SET_VFE_COMMAND,
+    CAM_INTF_PARM_SET_PP_COMMAND,
+    CAM_INTF_PARM_TINTLESS,
+    CAM_INTF_PARM_LONGSHOT_ENABLE,
+    CAM_INTF_PARM_RDI_MODE,
+    CAM_INTF_PARM_CDS_MODE,
+    CAM_INTF_PARM_TONE_MAP_MODE,
+    CAM_INTF_BUF_DIVERT_INFO,
+    CAM_INTF_PARM_CAPTURE_FRAME_CONFIG,
+
+    /* stream based parameters */
+    CAM_INTF_PARM_DO_REPROCESS,
+    CAM_INTF_PARM_SET_BUNDLE, /* 90 */
+    CAM_INTF_PARM_STREAM_FLIP,
+    CAM_INTF_PARM_GET_OUTPUT_CROP,
+
+    CAM_INTF_PARM_EZTUNE_CMD,
+    CAM_INTF_PARM_INT_EVT,
+
+    /* specific to HAL3 */
+    /* Whether the metadata maps to a valid frame number */
+    CAM_INTF_META_FRAME_NUMBER_VALID,
+    /* Whether the urgent metadata maps to a valid frame number */
+    CAM_INTF_META_URGENT_FRAME_NUMBER_VALID,
+    /* Whether the stream buffer corresponding this frame is dropped or not */
+    CAM_INTF_META_FRAME_DROPPED,
+    /* COLOR CORRECTION.*/
+    CAM_INTF_META_COLOR_CORRECT_MODE,
+    /* A transform matrix to chromatically adapt pixels in the CIE XYZ (1931)
+     * color space from the scene illuminant to the sRGB-standard D65-illuminant. */
+    CAM_INTF_META_COLOR_CORRECT_TRANSFORM,
+    /*Color channel gains in the Bayer raw domain in the order [RGeGoB]*/
+    CAM_INTF_META_COLOR_CORRECT_GAINS, /* 100 */
+    /*The best fit color transform matrix calculated by the stats*/
+    CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM,
+    /*The best fit color channels gains calculated by the stats*/
+    CAM_INTF_META_PRED_COLOR_CORRECT_GAINS,
+    /* CONTROL */
+    /* A frame counter set by the framework. Must be maintained unchanged in
+     * output frame. */
+    CAM_INTF_META_FRAME_NUMBER,
+    /* A frame counter set by the framework. Must be maintained unchanged in
+     * output frame. */
+    CAM_INTF_META_URGENT_FRAME_NUMBER,
+    /*Number of streams and size of streams in current configuration*/
+    CAM_INTF_META_STREAM_INFO,
+    /* List of areas to use for metering */
+    CAM_INTF_META_AEC_ROI,
+    /* Whether the HAL must trigger precapture metering.*/
+    CAM_INTF_META_AEC_PRECAPTURE_TRIGGER,
+    /* The ID sent with the latest CAMERA2_TRIGGER_PRECAPTURE_METERING call */
+    /* Current state of AE algorithm */
+    CAM_INTF_META_AEC_STATE,
+    /* List of areas to use for focus estimation */
+    CAM_INTF_META_AF_ROI,
+    /* Whether the HAL must trigger autofocus. */
+    CAM_INTF_META_AF_TRIGGER, /* 110 */
+    /* Current state of AF algorithm */
+    CAM_INTF_META_AF_STATE,
+    /* List of areas to use for illuminant estimation */
+    CAM_INTF_META_AWB_REGIONS,
+    /* Current state of AWB algorithm */
+    CAM_INTF_META_AWB_STATE,
+    /*Whether black level compensation is frozen or free to vary*/
+    CAM_INTF_META_BLACK_LEVEL_LOCK,
+    /* Information to 3A routines about the purpose of this capture, to help
+     * decide optimal 3A strategy */
+    CAM_INTF_META_CAPTURE_INTENT,
+    /* DEMOSAIC */
+    /* Controls the quality of the demosaicing processing */
+    CAM_INTF_META_DEMOSAIC,
+    /* EDGE */
+    /* Operation mode for edge enhancement */
+    CAM_INTF_META_EDGE_MODE,
+    /* Control the amount of edge enhancement applied to the images.*/
+    /* 1-10; 10 is maximum sharpening */
+    CAM_INTF_META_SHARPNESS_STRENGTH,
+    /* FLASH */
+    /* Power for flash firing/torch, 10 is max power; 0 is no flash. Linear */
+    CAM_INTF_META_FLASH_POWER,
+    /* Firing time of flash relative to start of exposure, in nanoseconds*/
+    CAM_INTF_META_FLASH_FIRING_TIME, /* 120 */
+    /* Current state of the flash unit */
+    CAM_INTF_META_FLASH_STATE,
+    /* GEOMETRIC */
+    /* Operating mode of geometric correction */
+    CAM_INTF_META_GEOMETRIC_MODE,
+    /* Control the amount of shading correction applied to the images */
+    CAM_INTF_META_GEOMETRIC_STRENGTH,
+    /* HOT PIXEL */
+    /* Set operational mode for hot pixel correction */
+    CAM_INTF_META_HOTPIXEL_MODE,
+    /* LENS */
+    /* Size of the lens aperture */
+    CAM_INTF_META_LENS_APERTURE,
+    /* State of lens neutral density filter(s) */
+    CAM_INTF_META_LENS_FILTERDENSITY,
+    /* Lens optical zoom setting */
+    CAM_INTF_META_LENS_FOCAL_LENGTH,
+    /* Distance to plane of sharpest focus, measured from frontmost surface
+     * of the lens */
+    CAM_INTF_META_LENS_FOCUS_DISTANCE,
+    /* The range of scene distances that are in sharp focus (depth of field) */
+    CAM_INTF_META_LENS_FOCUS_RANGE,
+    /*Whether the hal needs to output the lens shading map*/
+    CAM_INTF_META_LENS_SHADING_MAP_MODE, /* 130 */
+    /* Current lens status */
+    CAM_INTF_META_LENS_STATE,
+    /* NOISE REDUCTION */
+    /* Mode of operation for the noise reduction algorithm */
+    CAM_INTF_META_NOISE_REDUCTION_MODE,
+   /* Control the amount of noise reduction applied to the images.
+    * 1-10; 10 is max noise reduction */
+    CAM_INTF_META_NOISE_REDUCTION_STRENGTH,
+    /* SCALER */
+    /* Top-left corner and width of the output region to select from the active
+     * pixel array */
+    CAM_INTF_META_SCALER_CROP_REGION,
+    /* The estimated scene illumination lighting frequency */
+    CAM_INTF_META_SCENE_FLICKER,
+    /* SENSOR */
+    /* Duration each pixel is exposed to light, in nanoseconds */
+    CAM_INTF_META_SENSOR_EXPOSURE_TIME,
+    /* Duration from start of frame exposure to start of next frame exposure,
+     * in nanoseconds */
+    CAM_INTF_META_SENSOR_FRAME_DURATION,
+    /* Gain applied to image data. Must be implemented through analog gain only
+     * if set to values below 'maximum analog sensitivity'. */
+    CAM_INTF_META_SENSOR_SENSITIVITY,
+    /* Time at start of exposure of first row */
+    CAM_INTF_META_SENSOR_TIMESTAMP,
+    /* Duration b/w start of first row exposure and the start of last
+       row exposure in nanoseconds */
+    CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW, /* 140 */
+    /* SHADING */
+    /* Quality of lens shading correction applied to the image data */
+    CAM_INTF_META_SHADING_MODE,
+    /* Control the amount of shading correction applied to the images.
+     * unitless: 1-10; 10 is full shading compensation */
+    CAM_INTF_META_SHADING_STRENGTH,
+    /* STATISTICS */
+    /* State of the face detector unit */
+    CAM_INTF_META_STATS_FACEDETECT_MODE,
+    /* Operating mode for histogram generation */
+    CAM_INTF_META_STATS_HISTOGRAM_MODE,
+    /* Operating mode for sharpness map generation */
+    CAM_INTF_META_STATS_SHARPNESS_MAP_MODE,
+    /* A 3-channel sharpness map, based on the raw sensor data,
+     * If only a monochrome sharpness map is supported, all channels
+     * should have the same data
+     */
+    CAM_INTF_META_STATS_SHARPNESS_MAP,
+
+    /* TONEMAP */
+    /* Tone map mode */
+    CAM_INTF_META_TONEMAP_MODE,
+    /* Table mapping RGB input values to output values */
+    CAM_INTF_META_TONEMAP_CURVES,
+
+    CAM_INTF_META_FLASH_MODE,
+    /* 2D array of gain factors for each color channel that was used to
+     * compensate for lens shading for this frame */
+    CAM_INTF_META_LENS_SHADING_MAP, /* 150 */
+    CAM_INTF_META_PRIVATE_DATA,
+    CAM_INTF_PARM_STATS_DEBUG_MASK,
+    CAM_INTF_PARM_STATS_AF_PAAF,
+    /* Indicates streams ID of all the requested buffers */
+    CAM_INTF_META_STREAM_ID,
+    CAM_INTF_PARM_FOCUS_BRACKETING,
+    CAM_INTF_PARM_FLASH_BRACKETING,
+    CAM_INTF_PARM_GET_IMG_PROP,
+    CAM_INTF_META_JPEG_GPS_COORDINATES,
+    CAM_INTF_META_JPEG_GPS_PROC_METHODS,
+    CAM_INTF_META_JPEG_GPS_TIMESTAMP, /* 160 */
+    CAM_INTF_META_JPEG_ORIENTATION,
+    CAM_INTF_META_JPEG_QUALITY,
+    CAM_INTF_META_JPEG_THUMB_QUALITY,
+    CAM_INTF_META_JPEG_THUMB_SIZE,
+
+    CAM_INTF_META_TEST_PATTERN_DATA,
+    /* DNG file support */
+    CAM_INTF_META_PROFILE_TONE_CURVE,
+    CAM_INTF_META_NEUTRAL_COL_POINT,
+
+    /* CAC */
+    CAM_INTF_PARM_CAC,
+
+    /* trigger for all modules to read the debug/log level properties */
+    CAM_INTF_PARM_UPDATE_DEBUG_LEVEL,
+
+    /* OTP : WB gr/gb */
+    CAM_INTF_META_OTP_WB_GRGB, /* 170 */
+    /* LED override for EZTUNE */
+    CAM_INTF_META_LED_MODE_OVERRIDE,
+    /* auto lens position info */
+    CAM_INTF_META_FOCUS_POSITION,
+    /* Manual exposure time */
+    CAM_INTF_PARM_EXPOSURE_TIME,
+    /* AWB meta data info */
+    CAM_INTF_META_AWB_INFO,
+    /* Manual lens position info */
+    CAM_INTF_PARM_MANUAL_FOCUS_POS,
+    /* Manual White balance gains */
+    CAM_INTF_PARM_WB_MANUAL,
+    /* IMG LIB reprocess debug section */
+    CAM_INTF_META_IMGLIB, /* cam_intf_meta_imglib_t */
+    /* FLIP mode parameter*/
+    CAM_INTF_PARM_FLIP,
+    CAM_INTF_META_USE_AV_TIMER,
+
+    CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR,
+
+    /*Black level parameters*/
+    CAM_INTF_META_LDAF_EXIF,
+    CAM_INTF_META_BLACK_LEVEL_SOURCE_PATTERN,
+    CAM_INTF_META_BLACK_LEVEL_APPLIED_PATTERN,
+    CAM_INTF_META_CDS_DATA,
+
+    /* Whether EIS is enabled */
+    CAM_INTF_META_VIDEO_STAB_MODE,
+    /* Whether HAL has run into DRAIN error */
+    CAM_INTF_META_DAEMON_RESTART,
+    CAM_INTF_PARM_MAX
+} cam_intf_parm_type_t;
+
+typedef struct {
+    uint32_t forced;
+    union {
+      uint32_t force_linecount_value;
+      float    force_gain_value;
+      float    force_snap_exp_value;
+      float    force_exp_value;
+      uint32_t force_snap_linecount_value;
+      float    force_snap_gain_value;
+    } u;
+} cam_ez_force_params_t;
+
+typedef struct {
+    float cam_black_level[4];
+} cam_black_level_metadata_t;
+
+typedef enum {
+    CAM_EZTUNE_CMD_STATUS,
+    CAM_EZTUNE_CMD_AEC_ENABLE,
+    CAM_EZTUNE_CMD_AWB_ENABLE,
+    CAM_EZTUNE_CMD_AF_ENABLE,
+    CAM_EZTUNE_CMD_AEC_FORCE_LINECOUNT,
+    CAM_EZTUNE_CMD_AEC_FORCE_GAIN,
+    CAM_EZTUNE_CMD_AEC_FORCE_EXP,
+    CAM_EZTUNE_CMD_AEC_FORCE_SNAP_LC,
+    CAM_EZTUNE_CMD_AEC_FORCE_SNAP_GAIN,
+    CAM_EZTUNE_CMD_AEC_FORCE_SNAP_EXP,
+    CAM_EZTUNE_CMD_AWB_MODE,
+    CAM_EZTUNE_CMD_AWB_FORCE_DUAL_LED_IDX,
+} cam_eztune_cmd_type_t;
+
+typedef struct {
+  cam_eztune_cmd_type_t   cmd;
+  union {
+    int32_t running;
+    int32_t aec_enable;
+    int32_t awb_enable;
+    int32_t af_enable;
+    cam_ez_force_params_t ez_force_param;
+    int32_t awb_mode;
+    int32_t ez_force_dual_led_idx;
+  } u;
+} cam_eztune_cmd_data_t;
+
+
+/*****************************************************************************
+ *                 Code for HAL3 data types                                  *
+ ****************************************************************************/
+typedef enum {
+    CAM_INTF_METADATA_MAX
+} cam_intf_metadata_type_t;
+
+typedef enum {
+    CAM_INTENT_CUSTOM,
+    CAM_INTENT_PREVIEW,
+    CAM_INTENT_STILL_CAPTURE,
+    CAM_INTENT_VIDEO_RECORD,
+    CAM_INTENT_VIDEO_SNAPSHOT,
+    CAM_INTENT_ZERO_SHUTTER_LAG,
+    CAM_INTENT_MAX,
+} cam_intent_t;
+
+typedef enum {
+    /* Full application control of pipeline. All 3A routines are disabled,
+     * no other settings in android.control.* have any effect */
+    CAM_CONTROL_OFF,
+    /* Use settings for each individual 3A routine. Manual control of capture
+     * parameters is disabled. All controls in android.control.* besides sceneMode
+     * take effect */
+    CAM_CONTROL_AUTO,
+    /* Use specific scene mode. Enabling this disables control.aeMode,
+     * control.awbMode and control.afMode controls; the HAL must ignore those
+     * settings while USE_SCENE_MODE is active (except for FACE_PRIORITY scene mode).
+     * Other control entries are still active. This setting can only be used if
+     * availableSceneModes != UNSUPPORTED. TODO: Should we remove this and handle this
+     * in HAL ?*/
+    CAM_CONTROL_USE_SCENE_MODE,
+    CAM_CONTROL_MAX
+} cam_control_mode_t;
+
+typedef enum {
+    /* Use the android.colorCorrection.transform matrix to do color conversion */
+    CAM_COLOR_CORRECTION_TRANSFORM_MATRIX,
+    /* Must not slow down frame rate relative to raw bayer output */
+    CAM_COLOR_CORRECTION_FAST,
+    /* Frame rate may be reduced by high quality */
+    CAM_COLOR_CORRECTION_HIGH_QUALITY,
+} cam_color_correct_mode_t;
+
+typedef enum {
+    CAM_COLOR_CORRECTION_ABERRATION_OFF,
+    CAM_COLOR_CORRECTION_ABERRATION_FAST,
+    CAM_COLOR_CORRECTION_ABERRATION_HIGH_QUALITY,
+    CAM_COLOR_CORRECTION_ABERRATION_MAX
+} cam_aberration_mode_t;
+
+#define CC_MATRIX_ROWS 3
+#define CC_MATRIX_COLS 3
+
+typedef struct {
+    /* 3x3 float matrix in row-major order. each element is in range of (0, 1) */
+    cam_rational_type_t transform_matrix[CC_MATRIX_ROWS][CC_MATRIX_COLS];
+} cam_color_correct_matrix_t;
+
+#define CAM_FOCAL_LENGTHS_MAX     1
+#define CAM_APERTURES_MAX         1
+#define CAM_FILTER_DENSITIES_MAX  1
+#define CAM_MAX_MAP_HEIGHT        6
+#define CAM_MAX_MAP_WIDTH         6
+#define CAM_MAX_SHADING_MAP_WIDTH 17
+#define CAM_MAX_SHADING_MAP_HEIGHT 13
+#define CAM_MAX_TONEMAP_CURVE_SIZE    512
+#define CAM_MAX_FLASH_BRACKETING    5
+
+typedef struct {
+    /* A 1D array of pairs of floats.
+     * Mapping a 0-1 input range to a 0-1 output range.
+     * The input range must be monotonically increasing with N,
+     * and values between entries should be linearly interpolated.
+     * For example, if the array is: [0.0, 0.0, 0.3, 0.5, 1.0, 1.0],
+     * then the input->output mapping for a few sample points would be:
+     * 0 -> 0, 0.15 -> 0.25, 0.3 -> 0.5, 0.5 -> 0.64 */
+    float tonemap_points[CAM_MAX_TONEMAP_CURVE_SIZE][2];
+} cam_tonemap_curve_t;
+
+typedef struct {
+   size_t tonemap_points_cnt;
+   cam_tonemap_curve_t curves[3];
+} cam_rgb_tonemap_curves;
+
+typedef struct {
+   size_t tonemap_points_cnt;
+   cam_tonemap_curve_t curve;
+} cam_profile_tone_curve;
+
+#define NEUTRAL_COL_POINTS 3
+
+typedef struct {
+    cam_rational_type_t neutral_col_point[NEUTRAL_COL_POINTS];
+} cam_neutral_col_point_t;
+
+typedef enum {
+    OFF,
+    FAST,
+    QUALITY,
+} cam_quality_preference_t;
+
+typedef enum {
+    CAM_FLASH_CTRL_OFF,
+    CAM_FLASH_CTRL_SINGLE,
+    CAM_FLASH_CTRL_TORCH
+} cam_flash_ctrl_t;
+
+typedef struct {
+    uint8_t frame_dropped; /*  This flag indicates whether any stream buffer is dropped or not */
+    cam_stream_ID_t cam_stream_ID; /* if dropped, Stream ID of dropped streams */
+} cam_frame_dropped_t;
+
+typedef struct {
+    uint8_t ae_mode;
+    uint8_t awb_mode;
+    uint8_t af_mode;
+} cam_scene_mode_overrides_t;
+
+typedef struct {
+    int32_t left;
+    int32_t top;
+    int32_t width;
+    int32_t height;
+} cam_crop_region_t;
+
+typedef struct {
+    /* Estimated sharpness for each region of the input image.
+     * Normalized to be between 0 and maxSharpnessMapValue.
+     * Higher values mean sharper (better focused) */
+    int32_t sharpness[CAM_MAX_MAP_WIDTH][CAM_MAX_MAP_HEIGHT];
+} cam_sharpness_map_t;
+
+typedef struct {
+   float lens_shading[4*CAM_MAX_SHADING_MAP_HEIGHT*CAM_MAX_SHADING_MAP_WIDTH];
+} cam_lens_shading_map_t;
+
+typedef struct {
+    int32_t min_value;
+    int32_t max_value;
+    int32_t def_value;
+    int32_t step;
+} cam_control_range_t;
+
+#define CAM_QCOM_FEATURE_NONE            0U
+#define CAM_QCOM_FEATURE_FACE_DETECTION (1U<<0)
+#define CAM_QCOM_FEATURE_DENOISE2D      (1U<<1)
+#define CAM_QCOM_FEATURE_CROP           (1U<<2)
+#define CAM_QCOM_FEATURE_ROTATION       (1U<<3)
+#define CAM_QCOM_FEATURE_FLIP           (1U<<4)
+#define CAM_QCOM_FEATURE_HDR            (1U<<5)
+#define CAM_QCOM_FEATURE_REGISTER_FACE  (1U<<6)
+#define CAM_QCOM_FEATURE_SHARPNESS      (1U<<7)
+#define CAM_QCOM_FEATURE_VIDEO_HDR      (1U<<8)
+#define CAM_QCOM_FEATURE_CAC            (1U<<9)
+#define CAM_QCOM_FEATURE_SCALE          (1U<<10)
+#define CAM_QCOM_FEATURE_EFFECT         (1U<<11)
+#define CAM_QCOM_FEATURE_UBIFOCUS       (1U<<12)
+#define CAM_QCOM_FEATURE_CHROMA_FLASH   (1U<<13)
+#define CAM_QCOM_FEATURE_OPTIZOOM       (1U<<14)
+#define CAM_QCOM_FEATURE_SENSOR_HDR     (1U<<15)
+#define CAM_QCOM_FEATURE_REFOCUS        (1U<<16)
+#define CAM_QCOM_FEATURE_CPP_TNR        (1U<<17)
+#define CAM_QCOM_FEATURE_RAW_PROCESSING (1U<<18)
+#define CAM_QCOM_FEATURE_TRUEPORTRAIT   (1U<<19)
+#define CAM_QCOM_FEATURE_LLVD           (1U<<20)
+#define CAM_QCOM_FEATURE_DIS20          (1U<<21)
+#define CAM_QCOM_FEATURE_STILLMORE      (1U<<22)
+#define CAM_QCOM_FEATURE_CDS            (1U<<23)
+#define CAM_QCOM_FEATURE_MAX            (1U<<24)
+#define CAM_QCOM_FEATURE_PP_SUPERSET    (CAM_QCOM_FEATURE_DENOISE2D|CAM_QCOM_FEATURE_CROP|\
+                                         CAM_QCOM_FEATURE_ROTATION|CAM_QCOM_FEATURE_SHARPNESS|\
+                                         CAM_QCOM_FEATURE_SCALE|CAM_QCOM_FEATURE_CAC)
+
+#define CAM_QCOM_FEATURE_PP_PASS_1      CAM_QCOM_FEATURE_PP_SUPERSET
+#define CAM_QCOM_FEATURE_PP_PASS_2      CAM_QCOM_FEATURE_SCALE | CAM_QCOM_FEATURE_CROP;
+
+
+// Counter clock wise
+typedef enum {
+    ROTATE_0 = 1<<0,
+    ROTATE_90 = 1<<1,
+    ROTATE_180 = 1<<2,
+    ROTATE_270 = 1<<3,
+} cam_rotation_t;
+
+typedef struct {
+   cam_rotation_t rotation;         /* jpeg rotation */
+   cam_rotation_t device_rotation;  /* device rotation */
+   uint32_t streamId;
+} cam_rotation_info_t;
+
+typedef enum {
+    FLIP_NONE = 0, /* 00b */
+    FLIP_H = 1,    /* 01b */
+    FLIP_V = 2,    /* 10b */
+    FLIP_V_H = 3,  /* 11b */
+} cam_flip_t;
+
+typedef struct {
+    uint32_t bundle_id;                            /* bundle id */
+    uint8_t num_of_streams;                        /* number of streams in the bundle */
+    uint32_t stream_ids[MAX_STREAM_NUM_IN_BUNDLE]; /* array of stream ids to be bundled */
+} cam_bundle_config_t;
+
+typedef enum {
+    CAM_ONLINE_REPROCESS_TYPE,    /* online reprocess, frames from running streams */
+    CAM_OFFLINE_REPROCESS_TYPE,   /* offline reprocess, frames from external source */
+} cam_reprocess_type_enum_t;
+
+typedef struct {
+    uint8_t burst_count;
+    uint8_t min_burst_count;
+    uint8_t max_burst_count;
+} cam_still_more_t;
+
+typedef struct {
+    uint8_t burst_count;
+    uint8_t output_count;
+    uint8_t flash_bracketing[CAM_MAX_FLASH_BRACKETING];
+    uint8_t metadata_index;
+} cam_chroma_flash_t;
+
+typedef enum {
+    CAM_HDR_MODE_SINGLEFRAME,    /* Single frame HDR mode which does only tone mapping */
+    CAM_HDR_MODE_MULTIFRAME,     /* Multi frame HDR mode which needs two frames with 0.5x and 2x exposure respectively */
+} cam_hdr_mode_enum_t;
+
+typedef struct {
+    uint32_t hdr_enable;
+    uint32_t hdr_need_1x; /* when CAM_QCOM_FEATURE_HDR enabled, indicate if 1x is needed for output */
+    cam_hdr_mode_enum_t hdr_mode;
+} cam_hdr_param_t;
+
+typedef struct {
+    int32_t output_width;
+    int32_t output_height;
+} cam_scale_param_t;
+
+typedef struct {
+    uint8_t enable;
+    uint8_t burst_count;
+    uint8_t focus_steps[MAX_AF_BRACKETING_VALUES];
+    uint8_t output_count;
+    uint32_t meta_max_size;
+} cam_af_bracketing_t;
+
+typedef struct {
+    uint8_t enable;
+    uint8_t burst_count;
+} cam_flash_bracketing_t;
+
+typedef struct {
+    uint8_t enable;
+    uint8_t burst_count;
+    uint8_t zoom_threshold;
+} cam_opti_zoom_t;
+
+typedef struct {
+    size_t meta_max_size;
+} cam_true_portrait_t;
+
+typedef enum {
+    CAM_FLASH_OFF,
+    CAM_FLASH_ON
+} cam_flash_value_t;
+
+typedef struct {
+    cam_sensor_t sens_type;
+    cam_format_t native_format;
+} cam_sensor_type_t;
+
+typedef struct {
+    uint32_t result;
+    uint32_t header_size;
+    uint32_t width;
+    uint32_t height;
+    uint8_t data[0];
+} cam_misc_buf_t;
+
+typedef struct {
+    uint32_t misc_buffer_index;
+} cam_misc_buf_param_t;
+
+typedef struct {
+    /* reprocess feature mask */
+    uint32_t feature_mask;
+
+    /* individual setting for features to be reprocessed */
+    cam_denoise_param_t denoise2d;
+    cam_rect_t input_crop;
+    cam_rotation_t rotation;
+    uint32_t flip;
+    int32_t sharpness;
+    int32_t effect;
+    cam_hdr_param_t hdr_param;
+    cam_scale_param_t scale_param;
+
+    uint8_t zoom_level;
+    cam_flash_value_t flash_value;
+    cam_misc_buf_param_t misc_buf_param;
+    uint32_t burst_cnt;
+} cam_pp_feature_config_t;
+
+typedef struct {
+    uint32_t input_stream_id;
+    /* input source stream type */
+    cam_stream_type_t input_stream_type;
+} cam_pp_online_src_config_t;
+
+typedef struct {
+    /* image format */
+    cam_format_t input_fmt;
+
+    /* image dimension */
+    cam_dimension_t input_dim;
+
+    /* buffer plane information, will be calc based on stream_type, fmt,
+       dim, and padding_info(from stream config). Info including:
+       offset_x, offset_y, stride, scanline, plane offset */
+    cam_stream_buf_plane_info_t input_buf_planes;
+
+    /* number of input reprocess buffers */
+    uint8_t num_of_bufs;
+
+    /* input source type */
+    cam_stream_type_t input_type;
+
+} cam_pp_offline_src_config_t;
+
+/* reprocess stream input configuration */
+typedef struct {
+    /* input source config */
+    cam_reprocess_type_enum_t pp_type;
+    union {
+        cam_pp_online_src_config_t online;
+        cam_pp_offline_src_config_t offline;
+    };
+
+    /* pp feature config */
+    cam_pp_feature_config_t pp_feature_config;
+} cam_stream_reproc_config_t;
+
+typedef struct {
+    uint8_t crop_enabled;
+    cam_rect_t input_crop;
+} cam_crop_param_t;
+
+typedef struct {
+    uint8_t trigger;
+    int32_t trigger_id;
+} cam_trigger_t;
+
+typedef struct {
+    cam_denoise_param_t denoise2d;
+    cam_crop_param_t crop;
+    uint32_t flip;     /* 0 means no flip */
+    uint32_t uv_upsample; /* 0 means no chroma upsampling */
+    int32_t sharpness; /* 0 means no sharpness */
+    int32_t effect;
+    cam_rotation_t rotation;
+    cam_rotation_t device_rotation;
+} cam_per_frame_pp_config_t;
+
+typedef enum {
+    CAM_OPT_STAB_OFF,
+    CAM_OPT_STAB_ON,
+    CAM_OPT_STAB_MAX
+} cam_optical_stab_modes_t;
+
+typedef enum {
+    CAM_FILTER_ARRANGEMENT_RGGB,
+    CAM_FILTER_ARRANGEMENT_GRBG,
+    CAM_FILTER_ARRANGEMENT_GBRG,
+    CAM_FILTER_ARRANGEMENT_BGGR,
+
+    /* Sensor is not Bayer; output has 3 16-bit values for each pixel,
+     * instead of just 1 16-bit value per pixel.*/
+    CAM_FILTER_ARRANGEMENT_RGB,
+    /* Sensor is YUV; SW do not have access to actual RAW,
+     * output is interleaved UYVY */
+    CAM_FILTER_ARRANGEMENT_UYVY,
+    CAM_FILTER_ARRANGEMENT_YUYV,
+} cam_color_filter_arrangement_t;
+
+typedef enum {
+    CAM_AF_STATE_INACTIVE,
+    CAM_AF_STATE_PASSIVE_SCAN,
+    CAM_AF_STATE_PASSIVE_FOCUSED,
+    CAM_AF_STATE_ACTIVE_SCAN,
+    CAM_AF_STATE_FOCUSED_LOCKED,
+    CAM_AF_STATE_NOT_FOCUSED_LOCKED,
+    CAM_AF_STATE_PASSIVE_UNFOCUSED
+} cam_af_state_t;
+
+typedef enum {
+  CAM_AF_LENS_STATE_STATIONARY,
+  CAM_AF_LENS_STATE_MOVING,
+} cam_af_lens_state_t;
+
+typedef enum {
+    CAM_AWB_STATE_INACTIVE,
+    CAM_AWB_STATE_SEARCHING,
+    CAM_AWB_STATE_CONVERGED,
+    CAM_AWB_STATE_LOCKED
+} cam_awb_state_t;
+
+typedef enum {
+    CAM_FOCUS_UNCALIBRATED,
+    CAM_FOCUS_APPROXIMATE,
+    CAM_FOCUS_CALIBRATED
+} cam_focus_calibration_t;
+
+typedef enum {
+    CAM_TEST_PATTERN_OFF,
+    CAM_TEST_PATTERN_SOLID_COLOR,
+    CAM_TEST_PATTERN_COLOR_BARS,
+    CAM_TEST_PATTERN_COLOR_BARS_FADE_TO_GRAY,
+    CAM_TEST_PATTERN_PN9,
+} cam_test_pattern_mode_t;
+
+typedef struct {
+    cam_test_pattern_mode_t mode;
+    int32_t r;
+    int32_t gr;
+    int32_t gb;
+    int32_t b;
+} cam_test_pattern_data_t;
+
+typedef enum {
+    CAM_AWB_D50,
+    CAM_AWB_D65,
+    CAM_AWB_D75,
+    CAM_AWB_A,
+    CAM_AWB_CUSTOM_A,
+    CAM_AWB_WARM_FLO,
+    CAM_AWB_COLD_FLO,
+    CAM_AWB_CUSTOM_FLO,
+    CAM_AWB_NOON,
+    CAM_AWB_CUSTOM_DAYLIGHT,
+    CAM_AWB_INVALID_ALL_LIGHT,
+} cam_illuminat_t;
+
+typedef enum {
+    LEGACY_RAW,
+    MIPI_RAW,
+} cam_opaque_raw_format_t;
+
+typedef enum {
+    CAM_PERF_NORMAL = 0,
+    CAM_PERF_HIGH_PERFORMANCE,
+} cam_perf_mode_t;
+
+typedef struct {
+    float real_gain;
+    float lux_idx;
+    float exp_time;
+} cam_intf_aec_t;
+
+typedef struct {
+    uint32_t frame_count;
+    cam_intf_aec_t aec_data[CAM_INTF_AEC_DATA_MAX];
+} cam_intf_meta_imglib_input_aec_t;
+
+typedef struct {
+    cam_intf_meta_imglib_input_aec_t meta_imglib_input_aec;
+} cam_intf_meta_imglib_t;
+
+#endif /* __QCAMERA_TYPES_H__ */
diff --git a/camera/QCamera2/stack/common/mm_camera_interface.h b/camera/QCamera2/stack/common/mm_camera_interface.h
new file mode 100644
index 0000000..e203974
--- /dev/null
+++ b/camera/QCamera2/stack/common/mm_camera_interface.h
@@ -0,0 +1,813 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_CAMERA_INTERFACE_H__
+#define __MM_CAMERA_INTERFACE_H__
+#include <linux/msm_ion.h>
+#include <linux/videodev2.h>
+#include <media/msmb_camera.h>
+#include "cam_intf.h"
+#include "cam_queue.h"
+
+#define MM_CAMERA_MAX_NUM_SENSORS MSM_MAX_CAMERA_SENSORS
+#define MM_CAMERA_MAX_NUM_FRAMES CAM_MAX_NUM_BUFS_PER_STREAM
+/* num of channels allowed in a camera obj */
+#define MM_CAMERA_CHANNEL_MAX 16
+
+#define PAD_TO_SIZE(size, padding) \
+        ((size + (typeof(size))(padding - 1)) & \
+        (typeof(size))(~(padding - 1)))
+
+/** CAM_DUMP_TO_FILE:
+ *  @filename: file name
+ *  @name:filename
+ *  @index: index of the file
+ *  @extn: file extension
+ *  @p_addr: address of the buffer
+ *  @len: buffer length
+ *
+ *  dump the image to the file
+ **/
+#define CAM_DUMP_TO_FILE(path, name, index, extn, p_addr, len) ({ \
+  size_t rc = 0; \
+  char filename[FILENAME_MAX]; \
+  if (index >= 0) \
+    snprintf(filename, FILENAME_MAX, "%s/%s%d.%s", path, name, index, extn); \
+  else \
+    snprintf(filename, FILENAME_MAX, "%s/%s.%s", path, name, extn); \
+  FILE *fp = fopen(filename, "w+"); \
+  if (fp) { \
+    rc = fwrite(p_addr, 1, len, fp); \
+    ALOGE("%s:%d] written size %d", __func__, __LINE__, len); \
+    fclose(fp); \
+  } else { \
+    ALOGE("%s:%d] open %s failed", __func__, __LINE__, filename); \
+  } \
+})
+
+/* Declaring Buffer structure */
+struct mm_camera_buf_def;
+
+/** mm_camera_plane_def_t : structure for frame plane info
+*    @num_planes : num of planes for the frame buffer, to be
+*               filled during mem allocation
+*    @planes : plane info for the frame buffer, to be filled
+*               during mem allocation
+**/
+typedef struct {
+    int8_t num_planes;
+    struct v4l2_plane planes[VIDEO_MAX_PLANES];
+} mm_camera_plane_buf_def_t;
+
+/** mm_camera_user_buf_def_t : structure for frame plane info
+*    @num_buffers : num of buffers in this user defined structure
+*    @bufs_used : actual number of buffer filled
+*    @buf_in_use : flag to notify buffer usage status.
+*    @plane_buf : Plane buffer array pointer.
+**/
+typedef struct {
+    uint8_t num_buffers;
+    uint8_t bufs_used;     /*Num of Buffer filled by Kernel*/
+    uint8_t buf_in_use;  /* Container buffer is freed to fill*/
+    int32_t buf_idx[MSM_CAMERA_MAX_USER_BUFF_CNT];
+    struct mm_camera_buf_def *plane_buf;
+} mm_camera_user_buf_def_t;
+
+/** mm_camera_buf_def_t: structure for stream frame buf
+*    @stream_id : stream handler to uniquely identify a stream
+*               object
+*    @buf_idx : index of the buf within the stream bufs, to be
+*               filled during mem allocation
+*    @timespec_ts : time stamp, to be filled when DQBUF is
+*                 called
+*    @frame_idx : frame sequence num, to be filled when DQBUF
+*    @plane_buf  : Frame plane definition
+*    @fd : file descriptor of the frame buffer, to be filled
+*        during mem allocation
+*    @buffer : pointer to the frame buffer, to be filled during
+*            mem allocation
+*    @frame_len : length of the whole frame, to be filled during
+*               mem allocation
+*    @mem_info : user specific pointer to additional mem info
+*    @flags:  v4l2_buffer flags, used to report error in data buffers
+**/
+typedef struct mm_camera_buf_def {
+    uint32_t stream_id;
+    cam_stream_type_t stream_type;
+    cam_stream_buf_type buf_type;
+    uint32_t buf_idx;
+    uint8_t is_uv_subsampled;
+    struct timespec ts;
+    uint32_t frame_idx;
+    union {
+        mm_camera_plane_buf_def_t planes_buf;
+        mm_camera_user_buf_def_t user_buf;
+    };
+    int fd;
+    void *buffer;
+    size_t frame_len;
+    void *mem_info;
+    uint32_t flags;
+} mm_camera_buf_def_t;
+
+/** mm_camera_super_buf_t: super buf structure for bundled
+*   stream frames
+*    @camera_handle : camera handler to uniquely identify
+*              a camera object
+*    @ch_id : channel handler to uniquely ideentify a channel
+*           object
+*    @num_bufs : number of buffers in the super buf, should not
+*              exceeds MAX_STREAM_NUM_IN_BUNDLE
+*    @bufs : array of buffers in the bundle
+**/
+typedef struct {
+    uint32_t camera_handle;
+    uint32_t ch_id;
+    uint32_t num_bufs;
+    uint8_t bUnlockAEC;
+    uint8_t bReadyForPrepareSnapshot;
+    mm_camera_buf_def_t* bufs[MAX_STREAM_NUM_IN_BUNDLE];
+} mm_camera_super_buf_t;
+
+/** mm_camera_event_t: structure for event
+*    @server_event_type : event type from serer
+*    @status : status of an event, value could be
+*              CAM_STATUS_SUCCESS
+*              CAM_STATUS_FAILED
+**/
+typedef struct {
+    cam_event_type_t server_event_type;
+    uint32_t status;
+} mm_camera_event_t;
+
+/** mm_camera_event_notify_t: function definition for event
+*   notify handling
+*    @camera_handle : camera handler
+*    @evt : pointer to an event struct
+*    @user_data: user data pointer
+**/
+typedef void (*mm_camera_event_notify_t)(uint32_t camera_handle,
+                                         mm_camera_event_t *evt,
+                                         void *user_data);
+
+/** mm_camera_buf_notify_t: function definition for frame notify
+*   handling
+*    @mm_camera_super_buf_t : received frame buffers
+*    @user_data: user data pointer
+**/
+typedef void (*mm_camera_buf_notify_t) (mm_camera_super_buf_t *bufs,
+                                        void *user_data);
+
+/** map_stream_buf_op_t: function definition for operation of
+*   mapping stream buffers via domain socket
+*    @frame_idx : buffer index within stream buffers
+*    @plane_idx    : plane index. If all planes share the same
+*                   fd, plane_idx = -1; otherwise, plean_idx is
+*                   the index to plane (0..num_of_planes)
+*    @fd : file descriptor of the stream buffer
+*    @size: size of the stream buffer
+*    @userdata : user data pointer
+**/
+typedef int32_t (*map_stream_buf_op_t) (uint32_t frame_idx,
+                                        int32_t plane_idx,
+                                        int fd,
+                                        size_t size,
+                                        cam_mapping_buf_type type,
+                                        void *userdata);
+
+/** unmap_stream_buf_op_t: function definition for operation of
+*                          unmapping stream buffers via domain
+*                          socket
+*    @frame_idx : buffer index within stream buffers
+*    @plane_idx : plane index. If all planes share the same
+*                 fd, plane_idx = -1; otherwise, plean_idx is
+*                 the index to plane (0..num_of_planes)
+*    @userdata : user data pointer
+**/
+typedef int32_t (*unmap_stream_buf_op_t) (uint32_t frame_idx,
+                                          int32_t plane_idx,
+                                          cam_mapping_buf_type type,
+                                          void *userdata);
+
+/** mm_camera_map_unmap_ops_tbl_t: virtual table
+*                      for mapping/unmapping stream buffers via
+*                      domain socket
+*    @map_ops : operation for mapping
+*    @unmap_ops : operation for unmapping
+*    @userdata: user data pointer
+**/
+typedef struct {
+    map_stream_buf_op_t map_ops;
+    unmap_stream_buf_op_t unmap_ops;
+    void *userdata;
+} mm_camera_map_unmap_ops_tbl_t;
+
+/** mm_camera_stream_mem_vtbl_t: virtual table for stream
+*                      memory allocation and deallocation
+*    @get_bufs : function definition for allocating
+*                stream buffers
+*    @put_bufs : function definition for deallocating
+*                stream buffers
+*    @user_data: user data pointer
+**/
+typedef struct {
+  void *user_data;
+  int32_t (*get_bufs) (cam_frame_len_offset_t *offset,
+                       uint8_t *num_bufs,
+                       uint8_t **initial_reg_flag,
+                       mm_camera_buf_def_t **bufs,
+                       mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                       void *user_data);
+  int32_t (*put_bufs) (mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                       void *user_data);
+  int32_t (*invalidate_buf)(uint32_t index, void *user_data);
+  int32_t (*clean_invalidate_buf)(uint32_t index, void *user_data);
+} mm_camera_stream_mem_vtbl_t;
+
+/** mm_camera_stream_config_t: structure for stream
+*                              configuration
+*    @stream_info : pointer to a stream info structure
+*    @padding_info: padding info obtained from querycapability
+*    @mem_tbl : memory operation table for
+*              allocating/deallocating stream buffers
+*    @stream_cb : callback handling stream frame notify
+*    @userdata : user data pointer
+**/
+typedef struct {
+    cam_stream_info_t *stream_info;
+    cam_padding_info_t padding_info;
+    mm_camera_stream_mem_vtbl_t mem_vtbl;
+    mm_camera_buf_notify_t stream_cb;
+    void *userdata;
+} mm_camera_stream_config_t;
+
+/** mm_camera_super_buf_notify_mode_t: enum for super uffer
+*                                      notification mode
+*    @MM_CAMERA_SUPER_BUF_NOTIFY_BURST :
+*       ZSL use case: get burst of frames
+*    @MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS :
+*       get continuous frames: when the super buf is ready
+*       dispatch it to HAL
+**/
+typedef enum {
+    MM_CAMERA_SUPER_BUF_NOTIFY_BURST = 0,
+    MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS,
+    MM_CAMERA_SUPER_BUF_NOTIFY_MAX
+} mm_camera_super_buf_notify_mode_t;
+
+/** mm_camera_super_buf_priority_t: enum for super buffer
+*                                   matching priority
+*    @MM_CAMERA_SUPER_BUF_PRIORITY_NORMAL :
+*       Save the frame no matter focused or not. Currently only
+*       this type is supported.
+*    @MM_CAMERA_SUPER_BUF_PRIORITY_FOCUS :
+*       only queue the frame that is focused. Will enable meta
+*       data header to carry focus info
+*    @MM_CAMERA_SUPER_BUF_PRIORITY_EXPOSURE_BRACKETING :
+*       after shutter, only queue matched exposure index
+**/
+typedef enum {
+    MM_CAMERA_SUPER_BUF_PRIORITY_NORMAL = 0,
+    MM_CAMERA_SUPER_BUF_PRIORITY_FOCUS,
+    MM_CAMERA_SUPER_BUF_PRIORITY_EXPOSURE_BRACKETING,
+    MM_CAMERA_SUPER_BUF_PRIORITY_LOW,/* Bundled metadata frame may not match*/
+    MM_CAMERA_SUPER_BUF_PRIORITY_MAX
+} mm_camera_super_buf_priority_t;
+
+/** mm_camera_advanced_capture_t: enum for advanced capture type.
+*    @MM_CAMERA_AF_BRACKETING :
+*       to enable AF Bracketig.
+*    @MM_CAMERA_AE_BRACKETING :
+*       to enable AF Bracketing.
+*    @MM_CAMERA_FLASH_BRACKETING :
+*       to enable Flash Bracketing.
+*    @MM_CAMERA_ZOOM_1X :
+*       to enable zoom 1x capture request
+**/
+typedef enum {
+   MM_CAMERA_AF_BRACKETING = 0,
+   MM_CAMERA_AE_BRACKETING,
+   MM_CAMERA_FLASH_BRACKETING,
+   MM_CAMERA_ZOOM_1X,
+   MM_CAMERA_FRAME_CAPTURE,
+} mm_camera_advanced_capture_t;
+
+/** mm_camera_channel_attr_t: structure for defining channel
+*                             attributes
+*    @notify_mode : notify mode: burst or continuous
+*    @water_mark : queue depth. Only valid for burst mode
+*    @look_back : look back how many frames from last buf.
+*                 Only valid for burst mode
+*    @post_frame_skip : after send first frame to HAL, how many
+*                     frames needing to be skipped for next
+*                     delivery. Only valid for burst mode
+*    @max_unmatched_frames : max number of unmatched frames in
+*                     queue
+*    @priority : save matched priority frames only
+**/
+typedef struct {
+    mm_camera_super_buf_notify_mode_t notify_mode;
+    uint8_t water_mark;
+    uint8_t look_back;
+    uint8_t post_frame_skip;
+    uint8_t max_unmatched_frames;
+    mm_camera_super_buf_priority_t priority;
+} mm_camera_channel_attr_t;
+
+typedef struct {
+    /** query_capability: fucntion definition for querying static
+     *                    camera capabilities
+     *    @camera_handle : camer handler
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     *  Note: would assume cam_capability_t is already mapped
+     **/
+    int32_t (*query_capability) (uint32_t camera_handle);
+
+    /** register_event_notify: fucntion definition for registering
+     *                         for event notification
+     *    @camera_handle : camer handler
+     *    @evt_cb : callback for event notify
+     *    @user_data : user data poiner
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*register_event_notify) (uint32_t camera_handle,
+                                      mm_camera_event_notify_t evt_cb,
+                                      void *user_data);
+
+    /** close_camera: fucntion definition for closing a camera
+     *    @camera_handle : camer handler
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*close_camera) (uint32_t camera_handle);
+
+
+    /** error_close_camera: function definition for closing
+     *                      the camera backend on an unrecoverable
+     *                      error
+     *    @camera_handle : camera handler
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*error_close_camera) (uint32_t camera_handle);
+
+    /** map_buf: fucntion definition for mapping a camera buffer
+     *           via domain socket
+     *    @camera_handle : camer handler
+     *    @buf_type : type of mapping buffers, can be value of
+     *                CAM_MAPPING_BUF_TYPE_CAPABILITY
+     *                CAM_MAPPING_BUF_TYPE_SETPARM_BUF
+     *                CAM_MAPPING_BUF_TYPE_GETPARM_BUF
+     *    @fd : file descriptor of the stream buffer
+     *    @size :  size of the stream buffer
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*map_buf) (uint32_t camera_handle,
+                        uint8_t buf_type,
+                        int fd,
+                        size_t size);
+
+    /** unmap_buf: fucntion definition for unmapping a camera buffer
+     *           via domain socket
+     *    @camera_handle : camer handler
+     *    @buf_type : type of mapping buffers, can be value of
+     *                CAM_MAPPING_BUF_TYPE_CAPABILITY
+     *                CAM_MAPPING_BUF_TYPE_SETPARM_BUF
+     *                CAM_MAPPING_BUF_TYPE_GETPARM_BUF
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*unmap_buf) (uint32_t camera_handle,
+                          uint8_t buf_type);
+
+    /** set_parms: fucntion definition for setting camera
+     *             based parameters to server
+     *    @camera_handle : camer handler
+     *    @parms : batch for parameters to be set, stored in
+     *               parm_buffer_t
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     *  Note: would assume parm_buffer_t is already mapped, and
+     *       according parameter entries to be set are filled in the
+     *       buf before this call
+     **/
+    int32_t (*set_parms) (uint32_t camera_handle,
+                          parm_buffer_t *parms);
+
+    /** get_parms: fucntion definition for querying camera
+     *             based parameters from server
+     *    @camera_handle : camer handler
+     *    @parms : batch for parameters to be queried, stored in
+     *               parm_buffer_t
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     *  Note: would assume parm_buffer_t is already mapped, and
+     *       according parameter entries to be queried are filled in
+     *       the buf before this call
+     **/
+    int32_t (*get_parms) (uint32_t camera_handle,
+                          parm_buffer_t *parms);
+
+    /** do_auto_focus: fucntion definition for performing auto focus
+     *    @camera_handle : camer handler
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     *  Note: if this call success, we will always assume there will
+     *        be an auto_focus event following up.
+     **/
+    int32_t (*do_auto_focus) (uint32_t camera_handle);
+
+    /** cancel_auto_focus: fucntion definition for cancelling
+     *                     previous auto focus request
+     *    @camera_handle : camer handler
+    *  Return value: 0 -- success
+    *                -1 -- failure
+     **/
+    int32_t (*cancel_auto_focus) (uint32_t camera_handle);
+
+    /** prepare_snapshot: fucntion definition for preparing hardware
+     *                    for snapshot.
+     *    @camera_handle : camer handler
+     *    @do_af_flag    : flag indicating if AF needs to be done
+     *                     0 -- no AF needed
+     *                     1 -- AF needed
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*prepare_snapshot) (uint32_t camera_handle,
+                                 int32_t do_af_flag);
+
+    /** start_zsl_snapshot: function definition for starting
+     *                    zsl snapshot.
+     *    @camera_handle : camer handler
+     *    @ch_id         : channel id
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*start_zsl_snapshot) (uint32_t camera_handle, uint32_t ch_id);
+
+    /** stop_zsl_snapshot: function definition for stopping
+     *                    zsl snapshot.
+     *    @camera_handle : camer handler
+     *    @ch_id         : channel id
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*stop_zsl_snapshot) (uint32_t camera_handle, uint32_t ch_id);
+
+    /** add_channel: fucntion definition for adding a channel
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @attr : pointer to channel attribute structure
+     *    @channel_cb : callbak to handle bundled super buffer
+     *    @userdata : user data pointer
+     *  Return value: channel id, zero is invalid ch_id
+     * Note: attr, channel_cb, and userdata can be NULL if no
+     *       superbufCB is needed
+     **/
+    uint32_t (*add_channel) (uint32_t camera_handle,
+                             mm_camera_channel_attr_t *attr,
+                             mm_camera_buf_notify_t channel_cb,
+                             void *userdata);
+
+    /** delete_channel: fucntion definition for deleting a channel
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*delete_channel) (uint32_t camera_handle,
+                               uint32_t ch_id);
+
+    /** get_bundle_info: function definition for querying bundle
+     *  info of the channel
+     *    @camera_handle : camera handler
+     *    @ch_id         : channel handler
+     *    @bundle_info   : bundle info to be filled in
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*get_bundle_info) (uint32_t camera_handle,
+                                uint32_t ch_id,
+                                cam_bundle_config_t *bundle_info);
+
+    /** add_stream: fucntion definition for adding a stream
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *  Return value: stream_id. zero is invalid stream_id
+     **/
+    uint32_t (*add_stream) (uint32_t camera_handle,
+                            uint32_t ch_id);
+
+    /** delete_stream: fucntion definition for deleting a stream
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @stream_id : stream handler
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*delete_stream) (uint32_t camera_handle,
+                              uint32_t ch_id,
+                              uint32_t stream_id);
+
+    /** link_stream: function definition for linking a stream
+     *    @camera_handle : camera handle
+     *    @ch_id : channel handle from which the stream originates
+     *    @stream_id : stream handle
+     *    @linked_ch_id: channel handle in which the stream will be linked
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*link_stream) (uint32_t camera_handle,
+          uint32_t ch_id,
+          uint32_t stream_id,
+          uint32_t linked_ch_id);
+
+    /** config_stream: fucntion definition for configuring a stream
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @stream_id : stream handler
+     *    @confid : pointer to a stream configuration structure
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*config_stream) (uint32_t camera_handle,
+                              uint32_t ch_id,
+                              uint32_t stream_id,
+                              mm_camera_stream_config_t *config);
+
+    /** map_stream_buf: fucntion definition for mapping
+     *                 stream buffer via domain socket
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @stream_id : stream handler
+     *    @buf_type : type of mapping buffers, can be value of
+     *             CAM_MAPPING_BUF_TYPE_STREAM_BUF
+     *             CAM_MAPPING_BUF_TYPE_STREAM_INFO
+     *             CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+     *    @buf_idx : buffer index within the stream buffers
+     *    @plane_idx : plane index. If all planes share the same fd,
+     *               plane_idx = -1; otherwise, plean_idx is the
+     *               index to plane (0..num_of_planes)
+     *    @fd : file descriptor of the stream buffer
+     *    @size :  size of the stream buffer
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*map_stream_buf) (uint32_t camera_handle,
+                               uint32_t ch_id,
+                               uint32_t stream_id,
+                               uint8_t buf_type,
+                               uint32_t buf_idx,
+                               int32_t plane_idx,
+                               int fd,
+                               size_t size);
+
+    /** unmap_stream_buf: fucntion definition for unmapping
+     *                 stream buffer via domain socket
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @stream_id : stream handler
+     *    @buf_type : type of mapping buffers, can be value of
+     *             CAM_MAPPING_BUF_TYPE_STREAM_BUF
+     *             CAM_MAPPING_BUF_TYPE_STREAM_INFO
+     *             CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+     *    @buf_idx : buffer index within the stream buffers
+     *    @plane_idx : plane index. If all planes share the same fd,
+     *               plane_idx = -1; otherwise, plean_idx is the
+     *               index to plane (0..num_of_planes)
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*unmap_stream_buf) (uint32_t camera_handle,
+                                 uint32_t ch_id,
+                                 uint32_t stream_id,
+                                 uint8_t buf_type,
+                                 uint32_t buf_idx,
+                                 int32_t plane_idx);
+
+    /** set_stream_parms: fucntion definition for setting stream
+     *                    specific parameters to server
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @stream_id : stream handler
+     *    @parms : batch for parameters to be set
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     *  Note: would assume parm buffer is already mapped, and
+     *       according parameter entries to be set are filled in the
+     *       buf before this call
+     **/
+    int32_t (*set_stream_parms) (uint32_t camera_handle,
+                                 uint32_t ch_id,
+                                 uint32_t s_id,
+                                 cam_stream_parm_buffer_t *parms);
+
+    /** get_stream_parms: fucntion definition for querying stream
+     *                    specific parameters from server
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @stream_id : stream handler
+     *    @parms : batch for parameters to be queried
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     *  Note: would assume parm buffer is already mapped, and
+     *       according parameter entries to be queried are filled in
+     *       the buf before this call
+     **/
+    int32_t (*get_stream_parms) (uint32_t camera_handle,
+                                 uint32_t ch_id,
+                                 uint32_t s_id,
+                                 cam_stream_parm_buffer_t *parms);
+
+    /** start_channel: fucntion definition for starting a channel
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     * This call will start all streams belongs to the channel
+     **/
+    int32_t (*start_channel) (uint32_t camera_handle,
+                              uint32_t ch_id);
+
+    /** stop_channel: fucntion definition for stopping a channel
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     * This call will stop all streams belongs to the channel
+     **/
+    int32_t (*stop_channel) (uint32_t camera_handle,
+                             uint32_t ch_id);
+
+    /** qbuf: fucntion definition for queuing a frame buffer back to
+     *        kernel for reuse
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @buf : a frame buffer to be queued back to kernel
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*qbuf) (uint32_t camera_handle,
+                     uint32_t ch_id,
+                     mm_camera_buf_def_t *buf);
+
+    /** get_queued_buf_count: fucntion definition for querying queued buf count
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @stream_id : stream handler
+     *  Return value: queued buf count
+     **/
+    int32_t (*get_queued_buf_count) (uint32_t camera_handle,
+            uint32_t ch_id,
+            uint32_t stream_id);
+
+    /** request_super_buf: fucntion definition for requesting frames
+     *                     from superbuf queue in burst mode
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @num_buf_requested : number of super buffers requested
+     *    @num_retro_buf_requested : number of retro buffers requested
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*request_super_buf) (uint32_t camera_handle,
+                                  uint32_t ch_id,
+                                  uint32_t num_buf_requested,
+                                  uint32_t num_retro_buf_requested);
+
+    /** cancel_super_buf_request: fucntion definition for canceling
+     *                     frames dispatched from superbuf queue in
+     *                     burst mode
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*cancel_super_buf_request) (uint32_t camera_handle,
+                                         uint32_t ch_id);
+
+    /** flush_super_buf_queue: function definition for flushing out
+     *                     all frames in the superbuf queue up to frame_idx,
+     *                     even if frames with frame_idx come in later than
+     *                     this call.
+     *    @camera_handle : camer handler
+     *    @ch_id : channel handler
+     *    @frame_idx : frame index up until which all superbufs are flushed
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*flush_super_buf_queue) (uint32_t camera_handle,
+                                      uint32_t ch_id, uint32_t frame_idx);
+
+    /** configure_notify_mode: function definition for configuring the
+     *                         notification mode of channel
+     *    @camera_handle : camera handler
+     *    @ch_id : channel handler
+     *    @notify_mode : notification mode
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*configure_notify_mode) (uint32_t camera_handle,
+                                      uint32_t ch_id,
+                                      mm_camera_super_buf_notify_mode_t notify_mode);
+
+   /** process_advanced_capture: function definition for start/stop advanced capture
+     *                    for snapshot.
+     *    @camera_handle : camera handle
+     *    @ch_id : channel handler
+     *    @type :  advanced capture type.
+     *    @trigger    : flag indicating if advanced capture needs to be done
+     *                     0 -- stop advanced capture
+     *                     1 -- start advanced capture
+     *    @in_value: Input value. Configaration
+     *  Return value: 0 -- success
+     *                -1 -- failure
+     **/
+    int32_t (*process_advanced_capture) (uint32_t camera_handle,
+             uint32_t ch_id, mm_camera_advanced_capture_t type,
+             int8_t start_flag, void *in_value);
+} mm_camera_ops_t;
+
+/** mm_camera_vtbl_t: virtual table for camera operations
+*    @camera_handle : camera handler which uniquely identifies a
+*                   camera object
+*    @ops : API call table
+**/
+typedef struct {
+    uint32_t camera_handle;
+    mm_camera_ops_t *ops;
+} mm_camera_vtbl_t;
+
+/* return number of cameras */
+uint8_t get_num_of_cameras();
+
+/* return reference pointer of camera vtbl */
+int32_t camera_open(uint8_t camera_idx, mm_camera_vtbl_t **camera_obj);
+
+/* helper functions */
+int32_t mm_stream_calc_offset_preview(cam_format_t fmt,
+        cam_dimension_t *dim,
+        cam_stream_buf_plane_info_t *buf_planes);
+
+int32_t mm_stream_calc_offset_post_view(cam_format_t fmt,
+        cam_dimension_t *dim,
+        cam_stream_buf_plane_info_t *buf_planes);
+
+int32_t mm_stream_calc_offset_snapshot(cam_format_t fmt,
+        cam_dimension_t *dim,
+        cam_padding_info_t *padding,
+        cam_stream_buf_plane_info_t *buf_planes);
+
+int32_t mm_stream_calc_offset_raw(cam_format_t fmt,
+        cam_dimension_t *dim,
+        cam_padding_info_t *padding,
+        cam_stream_buf_plane_info_t *buf_planes);
+
+int32_t mm_stream_calc_offset_video(cam_dimension_t *dim,
+        cam_stream_buf_plane_info_t *buf_planes);
+
+int32_t mm_stream_calc_offset_metadata(cam_dimension_t *dim,
+        cam_padding_info_t *padding,
+        cam_stream_buf_plane_info_t *buf_planes);
+
+int32_t mm_stream_calc_offset_postproc(cam_stream_info_t *stream_info,
+        cam_padding_info_t *padding,
+        cam_stream_buf_plane_info_t *buf_planes);
+
+int32_t mm_stream_calc_offset_analysis(cam_format_t fmt,
+        cam_dimension_t *dim,
+        cam_padding_info_t *padding,
+        cam_stream_buf_plane_info_t *buf_planes);
+
+struct camera_info *get_cam_info(uint32_t camera_id);
+#endif /*__MM_CAMERA_INTERFACE_H__*/
diff --git a/camera/QCamera2/stack/common/mm_jpeg_interface.h b/camera/QCamera2/stack/common/mm_jpeg_interface.h
new file mode 100644
index 0000000..9913683
--- /dev/null
+++ b/camera/QCamera2/stack/common/mm_jpeg_interface.h
@@ -0,0 +1,321 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef MM_JPEG_INTERFACE_H_
+#define MM_JPEG_INTERFACE_H_
+#include "QOMX_JpegExtensions.h"
+#include "cam_intf.h"
+
+#define MM_JPEG_MAX_PLANES 3
+#define MM_JPEG_MAX_BUF CAM_MAX_NUM_BUFS_PER_STREAM
+#define QUANT_SIZE 64
+#define QTABLE_MAX 2
+
+typedef enum {
+  MM_JPEG_FMT_YUV,
+  MM_JPEG_FMT_BITSTREAM
+} mm_jpeg_format_t;
+
+typedef struct {
+  cam_3a_params_t cam_3a_params;
+  uint8_t cam_3a_params_valid;
+  cam_sensor_params_t sensor_params;
+  cam_ae_exif_debug_t ae_debug_params;
+  cam_awb_exif_debug_t awb_debug_params;
+  cam_af_exif_debug_t af_debug_params;
+  cam_asd_exif_debug_t asd_debug_params;
+  cam_stats_buffer_exif_debug_t stats_debug_params;
+  uint8_t ae_debug_params_valid;
+  uint8_t awb_debug_params_valid;
+  uint8_t af_debug_params_valid;
+  uint8_t asd_debug_params_valid;
+  uint8_t stats_debug_params_valid;
+} mm_jpeg_exif_params_t;
+
+typedef struct {
+  uint32_t sequence;          /* for jpeg bit streams, assembling is based on sequence. sequence starts from 0 */
+  uint8_t *buf_vaddr;        /* ptr to buf */
+  int fd;                    /* fd of buf */
+  size_t buf_size;         /* total size of buf (header + image) */
+  mm_jpeg_format_t format;   /* buffer format*/
+  cam_frame_len_offset_t offset; /* offset of all the planes */
+  uint32_t index; /* index used to identify the buffers */
+} mm_jpeg_buf_t;
+
+typedef struct {
+  uint8_t *buf_vaddr;        /* ptr to buf */
+  int fd;                    /* fd of buf */
+  size_t buf_filled_len;   /* used for output image. filled by the client */
+} mm_jpeg_output_t;
+
+typedef enum {
+  MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2,
+  MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2,
+  MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V1,
+  MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V1,
+  MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V2,
+  MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V2,
+  MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V1,
+  MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V1,
+  MM_JPEG_COLOR_FORMAT_MONOCHROME,
+  MM_JPEG_COLOR_FORMAT_BITSTREAM_H2V2,
+  MM_JPEG_COLOR_FORMAT_BITSTREAM_H2V1,
+  MM_JPEG_COLOR_FORMAT_BITSTREAM_H1V2,
+  MM_JPEG_COLOR_FORMAT_BITSTREAM_H1V1,
+  MM_JPEG_COLOR_FORMAT_MAX
+} mm_jpeg_color_format;
+
+typedef enum {
+  JPEG_JOB_STATUS_DONE = 0,
+  JPEG_JOB_STATUS_ERROR
+} jpeg_job_status_t;
+
+typedef void (*jpeg_encode_callback_t)(jpeg_job_status_t status,
+  uint32_t client_hdl,
+  uint32_t jobId,
+  mm_jpeg_output_t *p_output,
+  void *userData);
+
+typedef struct {
+  /* src img dimension */
+  cam_dimension_t src_dim;
+
+  /* jpeg output dimension */
+  cam_dimension_t dst_dim;
+
+  /* crop information */
+  cam_rect_t crop;
+} mm_jpeg_dim_t;
+
+typedef struct {
+  /* num of buf in src img */
+  uint32_t num_src_bufs;
+
+  /* num of src tmb bufs */
+  uint32_t num_tmb_bufs;
+
+  /* num of buf in src img */
+  uint32_t num_dst_bufs;
+
+  /* should create thumbnail from main image or not */
+  uint32_t encode_thumbnail;
+
+  /* src img bufs */
+  mm_jpeg_buf_t src_main_buf[MM_JPEG_MAX_BUF];
+
+  /* this will be used only for bitstream */
+  mm_jpeg_buf_t src_thumb_buf[MM_JPEG_MAX_BUF];
+
+  /* this will be used only for bitstream */
+  mm_jpeg_buf_t dest_buf[MM_JPEG_MAX_BUF];
+
+  /* mainimage color format */
+  mm_jpeg_color_format color_format;
+
+  /* thumbnail color format */
+  mm_jpeg_color_format thumb_color_format;
+
+  /* jpeg quality: range 0~100 */
+  uint32_t quality;
+
+  /* jpeg thumbnail quality: range 0~100 */
+  uint32_t thumb_quality;
+
+  /* buf to exif entries, caller needs to
+   * take care of the memory manage with insider ptr */
+  QOMX_EXIF_INFO exif_info;
+
+  /*Callback registered to be called after encode*/
+  jpeg_encode_callback_t jpeg_cb;
+
+  /*Appdata passed by the user*/
+  void* userdata;
+
+  /* thumbnail dimension */
+  mm_jpeg_dim_t thumb_dim;
+
+  /* rotation informaiton */
+  uint32_t rotation;
+
+  /* thumb rotation informaiton */
+  uint32_t thumb_rotation;
+
+  /* main image dimension */
+  mm_jpeg_dim_t main_dim;
+
+  /* enable encoder burst mode */
+  uint32_t burst_mode;
+
+  /* get memory function ptr */
+  int (*get_memory)( omx_jpeg_ouput_buf_t *p_out_buf);
+} mm_jpeg_encode_params_t;
+
+typedef struct {
+  /* num of buf in src img */
+  uint32_t num_src_bufs;
+
+  /* num of buf in src img */
+  uint32_t num_dst_bufs;
+
+  /* src img bufs */
+  mm_jpeg_buf_t src_main_buf[MM_JPEG_MAX_BUF];
+
+  /* this will be used only for bitstream */
+  mm_jpeg_buf_t dest_buf[MM_JPEG_MAX_BUF];
+
+  /* color format */
+  mm_jpeg_color_format color_format;
+
+  jpeg_encode_callback_t jpeg_cb;
+  void* userdata;
+
+} mm_jpeg_decode_params_t;
+
+typedef struct {
+  /* active indices of the buffers for encoding */
+  int32_t src_index;
+  int32_t dst_index;
+  uint32_t thumb_index;
+  mm_jpeg_dim_t thumb_dim;
+
+  /* rotation informaiton */
+  uint32_t rotation;
+
+  /* main image dimension */
+  mm_jpeg_dim_t main_dim;
+
+  /*session id*/
+  uint32_t session_id;
+
+  /*Metadata stream*/
+  metadata_buffer_t *p_metadata;
+
+  /*HAL version*/
+  cam_hal_version_t hal_version;
+
+  /* buf to exif entries, caller needs to
+   * take care of the memory manage with insider ptr */
+  QOMX_EXIF_INFO exif_info;
+
+  /* 3a parameters */
+  mm_jpeg_exif_params_t cam_exif_params;
+
+  /* jpeg encoder QTable */
+  uint8_t qtable_set[QTABLE_MAX];
+  OMX_IMAGE_PARAM_QUANTIZATIONTABLETYPE qtable[QTABLE_MAX];
+
+  /* flag to enable/disable mobicat */
+  uint8_t mobicat_mask;
+
+} mm_jpeg_encode_job_t;
+
+typedef struct {
+  /* active indices of the buffers for encoding */
+  int32_t src_index;
+  int32_t dst_index;
+  uint32_t tmb_dst_index;
+
+  /* rotation informaiton */
+  uint32_t rotation;
+
+  /* main image  */
+  mm_jpeg_dim_t main_dim;
+
+  /*session id*/
+  uint32_t session_id;
+} mm_jpeg_decode_job_t;
+
+typedef enum {
+  JPEG_JOB_TYPE_ENCODE,
+  JPEG_JOB_TYPE_DECODE,
+  JPEG_JOB_TYPE_MAX
+} mm_jpeg_job_type_t;
+
+typedef struct {
+  mm_jpeg_job_type_t job_type;
+  union {
+    mm_jpeg_encode_job_t encode_job;
+    mm_jpeg_decode_job_t decode_job;
+  };
+} mm_jpeg_job_t;
+
+typedef struct {
+  uint32_t w;
+  uint32_t h;
+} mm_dimension;
+
+typedef struct {
+  /* config a job -- async call */
+  int (*start_job)(mm_jpeg_job_t* job, uint32_t* job_id);
+
+  /* abort a job -- sync call */
+  int (*abort_job)(uint32_t job_id);
+
+  /* create a session */
+  int (*create_session)(uint32_t client_hdl,
+    mm_jpeg_encode_params_t *p_params, uint32_t *p_session_id);
+
+  /* destroy session */
+  int (*destroy_session)(uint32_t session_id);
+
+  /* close a jpeg client -- sync call */
+  int (*close) (uint32_t clientHdl);
+} mm_jpeg_ops_t;
+
+typedef struct {
+  /* config a job -- async call */
+  int (*start_job)(mm_jpeg_job_t* job, uint32_t* job_id);
+
+  /* abort a job -- sync call */
+  int (*abort_job)(uint32_t job_id);
+
+  /* create a session */
+  int (*create_session)(uint32_t client_hdl,
+    mm_jpeg_decode_params_t *p_params, uint32_t *p_session_id);
+
+  /* destroy session */
+  int (*destroy_session)(uint32_t session_id);
+
+  /* close a jpeg client -- sync call */
+  int (*close) (uint32_t clientHdl);
+} mm_jpegdec_ops_t;
+
+/* open a jpeg client -- sync call
+ * returns client_handle.
+ * failed if client_handle=0
+ * jpeg ops tbl will be filled in if open succeeds */
+uint32_t jpeg_open(mm_jpeg_ops_t *ops, mm_dimension picture_size);
+
+/* open a jpeg client -- sync call
+ * returns client_handle.
+ * failed if client_handle=0
+ * jpeg ops tbl will be filled in if open succeeds */
+uint32_t jpegdec_open(mm_jpegdec_ops_t *ops);
+
+#endif /* MM_JPEG_INTERFACE_H_ */
diff --git a/camera/QCamera2/stack/mm-camera-interface/Android.mk b/camera/QCamera2/stack/mm-camera-interface/Android.mk
new file mode 100644
index 0000000..1fc768d
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/Android.mk
@@ -0,0 +1,58 @@
+OLD_LOCAL_PATH := $(LOCAL_PATH)
+LOCAL_PATH := $(call my-dir)
+
+include $(LOCAL_PATH)/../../../common.mk
+include $(CLEAR_VARS)
+
+# Too many clang warnings/errors, see b/23163853.
+LOCAL_CLANG := false
+
+MM_CAM_FILES := \
+        src/mm_camera_interface.c \
+        src/mm_camera.c \
+        src/mm_camera_channel.c \
+        src/mm_camera_stream.c \
+        src/mm_camera_thread.c \
+        src/mm_camera_sock.c
+
+ifeq ($(strip $(TARGET_USES_ION)),true)
+    LOCAL_CFLAGS += -DUSE_ION
+endif
+
+ifneq (,$(filter msm8974 msm8916 msm8226 msm8610 msm8916 apq8084 msm8084 msm8994 msm8992,$(TARGET_BOARD_PLATFORM)))
+    LOCAL_CFLAGS += -DVENUS_PRESENT
+endif
+
+LOCAL_CFLAGS += -D_ANDROID_
+LOCAL_COPY_HEADERS_TO := mm-camera-interface
+LOCAL_COPY_HEADERS += ../common/cam_intf.h
+LOCAL_COPY_HEADERS += ../common/cam_types.h
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/inc \
+    $(LOCAL_PATH)/../common \
+    system/media/camera/include
+
+LOCAL_CFLAGS += -DCAMERA_ION_HEAP_ID=ION_IOMMU_HEAP_ID
+LOCAL_C_INCLUDES+= $(kernel_includes)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(common_deps)
+
+LOCAL_C_INCLUDES += hardware/qcom/media/mm-core/inc
+
+ifneq (1,$(filter 1,$(shell echo "$$(( $(PLATFORM_SDK_VERSION) >= 17 ))" )))
+  LOCAL_CFLAGS += -include bionic/libc/kernel/common/linux/socket.h
+  LOCAL_CFLAGS += -include bionic/libc/kernel/common/linux/un.h
+endif
+LOCAL_CFLAGS += -Wall -Wextra -Werror
+
+LOCAL_SRC_FILES := $(MM_CAM_FILES)
+
+LOCAL_MODULE           := libmmcamera_interface
+LOCAL_PRELINK_MODULE   := false
+LOCAL_SHARED_LIBRARIES := libdl libcutils liblog
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_32_BIT_ONLY := $(BOARD_QTI_CAMERA_32BIT_ONLY)
+include $(BUILD_SHARED_LIBRARY)
+
+LOCAL_PATH := $(OLD_LOCAL_PATH)
diff --git a/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera.h b/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera.h
new file mode 100644
index 0000000..424f6c5
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera.h
@@ -0,0 +1,693 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_CAMERA_H__
+#define __MM_CAMERA_H__
+
+#include <cam_semaphore.h>
+
+#include "mm_camera_interface.h"
+#include <hardware/camera.h>
+#include <utils/Timers.h>
+
+/**********************************************************************************
+* Data structure declare
+***********************************************************************************/
+/* num of callbacks allowed for an event type */
+#define MM_CAMERA_EVT_ENTRY_MAX 4
+/* num of data callbacks allowed in a stream obj */
+#define MM_CAMERA_STREAM_BUF_CB_MAX 4
+/* num of data poll threads allowed in a channel obj */
+#define MM_CAMERA_CHANNEL_POLL_THREAD_MAX 1
+
+#define MM_CAMERA_DEV_NAME_LEN 32
+#define MM_CAMERA_DEV_OPEN_TRIES 20
+#define MM_CAMERA_DEV_OPEN_RETRY_SLEEP 20
+#define THREAD_NAME_SIZE 15
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
+
+struct mm_channel;
+struct mm_stream;
+struct mm_camera_obj;
+
+typedef enum
+{
+    MM_CAMERA_CMD_TYPE_DATA_CB,    /* dataB CMD */
+    MM_CAMERA_CMD_TYPE_EVT_CB,     /* evtCB CMD */
+    MM_CAMERA_CMD_TYPE_EXIT,       /* EXIT */
+    MM_CAMERA_CMD_TYPE_REQ_DATA_CB,/* request data */
+    MM_CAMERA_CMD_TYPE_SUPER_BUF_DATA_CB,    /* superbuf dataB CMD */
+    MM_CAMERA_CMD_TYPE_CONFIG_NOTIFY, /* configure notify mode */
+    MM_CAMERA_CMD_TYPE_START_ZSL, /* start zsl snapshot for channel */
+    MM_CAMERA_CMD_TYPE_STOP_ZSL, /* stop zsl snapshot for channel */
+    MM_CAMERA_CMD_TYPE_FLUSH_QUEUE, /* flush queue */
+    MM_CAMERA_CMD_TYPE_GENERAL,  /* general cmd */
+    MM_CAMERA_CMD_TYPE_MAX
+} mm_camera_cmdcb_type_t;
+
+typedef struct {
+    uint32_t stream_id;
+    uint32_t frame_idx;
+    uint32_t flags;
+    mm_camera_buf_def_t *buf; /* ref to buf */
+} mm_camera_buf_info_t;
+
+typedef struct {
+    uint32_t num_buf_requested;
+    uint32_t num_retro_buf_requested;
+} mm_camera_req_buf_t;
+
+typedef enum {
+    MM_CAMERA_GENERIC_CMD_TYPE_AE_BRACKETING,
+    MM_CAMERA_GENERIC_CMD_TYPE_AF_BRACKETING,
+    MM_CAMERA_GENERIC_CMD_TYPE_FLASH_BRACKETING,
+    MM_CAMERA_GENERIC_CMD_TYPE_ZOOM_1X,
+    MM_CAMERA_GENERIC_CMD_TYPE_CAPTURE_SETTING,
+} mm_camera_generic_cmd_type_t;
+
+typedef struct {
+    mm_camera_generic_cmd_type_t type;
+    uint32_t payload[32];
+    union {
+        cam_capture_frame_config_t frame_config;
+    };
+} mm_camera_generic_cmd_t;
+
+typedef struct {
+    mm_camera_cmdcb_type_t cmd_type;
+    union {
+        mm_camera_buf_info_t buf;    /* frame buf if dataCB */
+        mm_camera_event_t evt;       /* evt if evtCB */
+        mm_camera_super_buf_t superbuf; /* superbuf if superbuf dataCB*/
+        mm_camera_req_buf_t req_buf; /* num of buf requested */
+        uint32_t frame_idx; /* frame idx boundary for flush superbuf queue*/
+        mm_camera_super_buf_notify_mode_t notify_mode; /* notification mode */
+        mm_camera_generic_cmd_t gen_cmd;
+    } u;
+} mm_camera_cmdcb_t;
+
+typedef void (*mm_camera_cmd_cb_t)(mm_camera_cmdcb_t * cmd_cb, void* user_data);
+
+typedef struct {
+    cam_queue_t cmd_queue; /* cmd queue (queuing dataCB, asyncCB, or exitCMD) */
+    pthread_t cmd_pid;           /* cmd thread ID */
+    cam_semaphore_t cmd_sem;     /* semaphore for cmd thread */
+    mm_camera_cmd_cb_t cb;       /* cb for cmd */
+    void* user_data;             /* user_data for cb */
+    char threadName[THREAD_NAME_SIZE];
+} mm_camera_cmd_thread_t;
+
+typedef enum {
+    MM_CAMERA_POLL_TYPE_EVT,
+    MM_CAMERA_POLL_TYPE_DATA,
+    MM_CAMERA_POLL_TYPE_MAX
+} mm_camera_poll_thread_type_t;
+
+/* function ptr defined for poll notify CB,
+ * registered at poll thread with poll fd */
+typedef void (*mm_camera_poll_notify_t)(void *user_data);
+
+typedef struct {
+    int32_t fd;
+    mm_camera_poll_notify_t notify_cb;
+    uint32_t handler;
+    void* user_data;
+} mm_camera_poll_entry_t;
+
+typedef struct {
+    mm_camera_poll_thread_type_t poll_type;
+    /* array to store poll fd and cb info
+     * for MM_CAMERA_POLL_TYPE_EVT, only index 0 is valid;
+     * for MM_CAMERA_POLL_TYPE_DATA, depends on valid stream fd */
+    mm_camera_poll_entry_t poll_entries[MAX_STREAM_NUM_IN_BUNDLE];
+    int32_t pfds[2];
+    pthread_t pid;
+    int32_t state;
+    int timeoutms;
+    uint32_t cmd;
+    struct pollfd poll_fds[MAX_STREAM_NUM_IN_BUNDLE + 1];
+    uint8_t num_fds;
+    pthread_mutex_t mutex;
+    pthread_cond_t cond_v;
+    int32_t status;
+    char threadName[THREAD_NAME_SIZE];
+    //void *my_obj;
+} mm_camera_poll_thread_t;
+
+/* mm_stream */
+typedef enum {
+    MM_STREAM_STATE_NOTUSED = 0,      /* not used */
+    MM_STREAM_STATE_INITED,           /* inited  */
+    MM_STREAM_STATE_ACQUIRED,         /* acquired, fd opened  */
+    MM_STREAM_STATE_CFG,              /* fmt & dim configured */
+    MM_STREAM_STATE_BUFFED,           /* buf allocated */
+    MM_STREAM_STATE_REG,              /* buf regged, stream off */
+    MM_STREAM_STATE_ACTIVE,           /* active */
+    MM_STREAM_STATE_MAX
+} mm_stream_state_type_t;
+
+typedef enum {
+    MM_STREAM_EVT_ACQUIRE,
+    MM_STREAM_EVT_RELEASE,
+    MM_STREAM_EVT_SET_FMT,
+    MM_STREAM_EVT_GET_BUF,
+    MM_STREAM_EVT_PUT_BUF,
+    MM_STREAM_EVT_REG_BUF,
+    MM_STREAM_EVT_UNREG_BUF,
+    MM_STREAM_EVT_START,
+    MM_STREAM_EVT_STOP,
+    MM_STREAM_EVT_QBUF,
+    MM_STREAM_EVT_SET_PARM,
+    MM_STREAM_EVT_GET_PARM,
+    MM_STREAM_EVT_DO_ACTION,
+    MM_STREAM_EVT_GET_QUEUED_BUF_COUNT,
+    MM_STREAM_EVT_MAX
+} mm_stream_evt_type_t;
+
+typedef struct {
+    mm_camera_buf_notify_t cb;
+    void *user_data;
+    /* cb_count = -1: infinite
+     * cb_count > 0: register only for required times */
+    int8_t cb_count;
+} mm_stream_data_cb_t;
+
+typedef struct {
+    /* buf reference count */
+    uint8_t buf_refcnt;
+
+    /* This flag is to indicate if after allocation,
+     * the corresponding buf needs to qbuf into kernel
+     * (e.g. for preview usecase, display needs to hold two bufs,
+     * so no need to qbuf these two bufs initially) */
+    uint8_t initial_reg_flag;
+
+    /* indicate if buf is in kernel(1) or client(0) */
+    uint8_t in_kernel;
+} mm_stream_buf_status_t;
+
+typedef struct mm_stream {
+    uint32_t my_hdl; /* local stream id */
+    uint32_t server_stream_id; /* stream id from server */
+    int32_t fd;
+    mm_stream_state_type_t state;
+
+    /* stream info*/
+    cam_stream_info_t *stream_info;
+
+    /* padding info */
+    cam_padding_info_t padding_info;
+
+    /* offset */
+    cam_frame_len_offset_t frame_offset;
+
+    mm_camera_cmd_thread_t cmd_thread;
+
+    /* dataCB registered on this stream obj */
+    pthread_mutex_t cb_lock; /* cb lock to protect buf_cb */
+    mm_stream_data_cb_t buf_cb[MM_CAMERA_STREAM_BUF_CB_MAX];
+
+    /* stream buffer management */
+    pthread_mutex_t buf_lock;
+    uint8_t buf_num; /* num of buffers allocated */
+    mm_camera_buf_def_t* buf; /* ptr to buf array */
+    mm_stream_buf_status_t* buf_status; /* ptr to buf status array */
+
+    uint8_t plane_buf_num; /* num of plane buffers allocated  Used only in Batch mode*/
+    mm_camera_buf_def_t *plane_buf; /*Pointer to plane buffer array Used only in Batch mode */
+    int32_t cur_buf_idx; /* Current container buffer active filling. Used only in Batch mode*/
+    uint8_t cur_bufs_staged; /*Number of plane buf freed by HAL for this usr buf*/
+
+
+    /* reference to parent channel_obj */
+    struct mm_channel* ch_obj;
+
+    uint8_t is_bundled; /* flag if stream is bundled */
+
+    /* reference to linked channel_obj */
+    struct mm_channel* linked_obj;
+    struct mm_stream * linked_stream; /* original stream */
+    uint8_t is_linked; /* flag if stream is linked */
+
+    mm_camera_stream_mem_vtbl_t mem_vtbl; /* mem ops tbl */
+
+    mm_camera_map_unmap_ops_tbl_t map_ops;
+
+    int8_t queued_buffer_count;
+
+    /*latest timestamp of this stream frame received & last frameID*/
+    uint32_t prev_frameID;
+    nsecs_t prev_timestamp;
+} mm_stream_t;
+
+/* mm_channel */
+typedef enum {
+    MM_CHANNEL_STATE_NOTUSED = 0,   /* not used */
+    MM_CHANNEL_STATE_STOPPED,       /* stopped */
+    MM_CHANNEL_STATE_ACTIVE,        /* active, at least one stream active */
+    MM_CHANNEL_STATE_PAUSED,        /* paused */
+    MM_CHANNEL_STATE_MAX
+} mm_channel_state_type_t;
+
+typedef enum {
+    MM_CHANNEL_EVT_ADD_STREAM,
+    MM_CHANNEL_EVT_DEL_STREAM,
+    MM_CHANNEL_EVT_LINK_STREAM,
+    MM_CHANNEL_EVT_CONFIG_STREAM,
+    MM_CHANNEL_EVT_GET_BUNDLE_INFO,
+    MM_CHANNEL_EVT_START,
+    MM_CHANNEL_EVT_STOP,
+    MM_CHANNEL_EVT_PAUSE,
+    MM_CHANNEL_EVT_RESUME,
+    MM_CHANNEL_EVT_REQUEST_SUPER_BUF,
+    MM_CHANNEL_EVT_CANCEL_REQUEST_SUPER_BUF,
+    MM_CHANNEL_EVT_FLUSH_SUPER_BUF_QUEUE,
+    MM_CHANNEL_EVT_CONFIG_NOTIFY_MODE,
+    MM_CHANNEL_EVT_START_ZSL_SNAPSHOT,
+    MM_CHANNEL_EVT_STOP_ZSL_SNAPSHOT,
+    MM_CHANNEL_EVT_MAP_STREAM_BUF,
+    MM_CHANNEL_EVT_UNMAP_STREAM_BUF,
+    MM_CHANNEL_EVT_SET_STREAM_PARM,
+    MM_CHANNEL_EVT_GET_STREAM_PARM,
+    MM_CHANNEL_EVT_DO_STREAM_ACTION,
+    MM_CHANNEL_EVT_DELETE,
+    MM_CHANNEL_EVT_AF_BRACKETING,
+    MM_CHANNEL_EVT_AE_BRACKETING,
+    MM_CHANNEL_EVT_FLASH_BRACKETING,
+    MM_CHANNEL_EVT_ZOOM_1X,
+    MM_CAMERA_EVT_CAPTURE_SETTING,
+    MM_CHANNEL_EVT_GET_STREAM_QUEUED_BUF_COUNT,
+} mm_channel_evt_type_t;
+
+typedef struct {
+    uint32_t stream_id;
+    mm_camera_stream_config_t *config;
+} mm_evt_paylod_config_stream_t;
+
+typedef struct {
+    uint32_t stream_id;
+    cam_stream_parm_buffer_t *parms;
+} mm_evt_paylod_set_get_stream_parms_t;
+
+typedef struct {
+    uint32_t stream_id;
+    void *actions;
+} mm_evt_paylod_do_stream_action_t;
+
+typedef struct {
+    uint32_t stream_id;
+    uint8_t buf_type;
+    uint32_t buf_idx;
+    int32_t plane_idx;
+    int fd;
+    size_t size;
+} mm_evt_paylod_map_stream_buf_t;
+
+typedef struct {
+    uint32_t stream_id;
+    uint8_t buf_type;
+    uint32_t buf_idx;
+    int32_t plane_idx;
+} mm_evt_paylod_unmap_stream_buf_t;
+
+typedef struct {
+    uint8_t num_of_bufs;
+    mm_camera_buf_info_t super_buf[MAX_STREAM_NUM_IN_BUNDLE];
+    uint8_t matched;
+    uint8_t expected;
+    uint32_t frame_idx;
+} mm_channel_queue_node_t;
+
+typedef struct {
+    cam_queue_t que;
+    uint8_t num_streams;
+    /* container for bundled stream handlers */
+    uint32_t bundled_streams[MAX_STREAM_NUM_IN_BUNDLE];
+    mm_camera_channel_attr_t attr;
+    uint32_t expected_frame_id;
+    uint32_t match_cnt;
+    uint32_t expected_frame_id_without_led;
+    uint32_t led_on_start_frame_id;
+    uint32_t led_off_start_frame_id;
+    uint32_t led_on_num_frames;
+    uint32_t once;
+    uint32_t frame_skip_count;
+    uint32_t nomatch_frame_id;
+} mm_channel_queue_t;
+
+typedef struct {
+    uint8_t is_active; /* flag to indicate if bundle is valid */
+    /* queue to store bundled super buffers */
+    mm_channel_queue_t superbuf_queue;
+    mm_camera_buf_notify_t super_buf_notify_cb;
+    void *user_data;
+} mm_channel_bundle_t;
+
+typedef enum {
+    MM_CHANNEL_BRACKETING_STATE_OFF,
+    MM_CHANNEL_BRACKETING_STATE_WAIT_GOOD_FRAME_IDX,
+    MM_CHANNEL_BRACKETING_STATE_ACTIVE,
+} mm_channel_bracketing_state_t;
+
+typedef struct mm_channel {
+    uint32_t my_hdl;
+    mm_channel_state_type_t state;
+    pthread_mutex_t ch_lock; /* channel lock */
+
+    /* stream bundle info in the channel */
+    mm_channel_bundle_t bundle;
+
+    /* num of pending suferbuffers */
+    uint32_t pending_cnt;
+    uint32_t pending_retro_cnt;
+    uint32_t bWaitForPrepSnapshotDone;
+    uint32_t unLockAEC;
+    /* num of pending suferbuffers */
+    uint8_t stopZslSnapshot;
+
+    /* cmd thread for superbuffer dataCB and async stop*/
+    mm_camera_cmd_thread_t cmd_thread;
+
+    /* cb thread for sending data cb */
+    mm_camera_cmd_thread_t cb_thread;
+
+    /* data poll thread
+    * currently one data poll thread per channel
+    * could extended to support one data poll thread per stream in the channel */
+    mm_camera_poll_thread_t poll_thread[MM_CAMERA_CHANNEL_POLL_THREAD_MAX];
+
+    /* container for all streams in channel */
+    mm_stream_t streams[MAX_STREAM_NUM_IN_BUNDLE];
+
+    /* reference to parent cam_obj */
+    struct mm_camera_obj* cam_obj;
+
+    /* manual zsl snapshot control */
+    uint8_t manualZSLSnapshot;
+
+    /* control for zsl led */
+    uint8_t startZSlSnapshotCalled;
+    uint8_t needLEDFlash;
+    mm_channel_bracketing_state_t bracketingState;
+    uint8_t isFlashBracketingEnabled;
+    uint8_t isZoom1xFrameRequested;
+    uint32_t burstSnapNum;
+    char threadName[THREAD_NAME_SIZE];
+
+    /*Buffer diverted*/
+    uint8_t diverted_frame_id;
+
+    /*Frame capture configaration*/
+    uint8_t cur_capture_idx;
+    cam_capture_frame_config_t *frame_config;
+} mm_channel_t;
+
+typedef struct {
+    mm_channel_t *ch;
+    uint32_t stream_id;
+} mm_camera_stream_link_t;
+
+/* struct to store information about pp cookie*/
+typedef struct {
+    uint32_t cam_hdl;
+    uint32_t ch_hdl;
+    uint32_t stream_hdl;
+    mm_channel_queue_node_t* super_buf;
+} mm_channel_pp_info_t;
+
+/* mm_camera */
+typedef struct {
+    mm_camera_event_notify_t evt_cb;
+    void *user_data;
+} mm_camera_evt_entry_t;
+
+typedef struct {
+    mm_camera_evt_entry_t evt[MM_CAMERA_EVT_ENTRY_MAX];
+    /* reg_count <=0: infinite
+     * reg_count > 0: register only for required times */
+    int reg_count;
+} mm_camera_evt_obj_t;
+
+typedef struct mm_camera_obj {
+    uint32_t my_hdl;
+    int ref_count;
+    int32_t ctrl_fd;
+    int32_t ds_fd; /* domain socket fd */
+    pthread_mutex_t cam_lock;
+    pthread_mutex_t cb_lock; /* lock for evt cb */
+    mm_channel_t ch[MM_CAMERA_CHANNEL_MAX];
+    mm_camera_evt_obj_t evt;
+    mm_camera_poll_thread_t evt_poll_thread; /* evt poll thread */
+    mm_camera_cmd_thread_t evt_thread;       /* thread for evt CB */
+    mm_camera_vtbl_t vtbl;
+
+    pthread_mutex_t evt_lock;
+    pthread_cond_t evt_cond;
+    mm_camera_event_t evt_rcvd;
+
+    pthread_mutex_t msg_lock; /* lock for sending msg through socket */
+} mm_camera_obj_t;
+
+typedef struct {
+    int8_t num_cam;
+    char video_dev_name[MM_CAMERA_MAX_NUM_SENSORS][MM_CAMERA_DEV_NAME_LEN];
+    mm_camera_obj_t *cam_obj[MM_CAMERA_MAX_NUM_SENSORS];
+    struct camera_info info[MM_CAMERA_MAX_NUM_SENSORS];
+} mm_camera_ctrl_t;
+
+typedef enum {
+    mm_camera_async_call,
+    mm_camera_sync_call
+} mm_camera_call_type_t;
+
+/**********************************************************************************
+* external function declare
+***********************************************************************************/
+/* utility functions */
+/* set int32_t value */
+extern int32_t mm_camera_util_s_ctrl(int32_t fd,
+                                     uint32_t id,
+                                     int32_t *value);
+
+/* get int32_t value */
+extern int32_t mm_camera_util_g_ctrl(int32_t fd,
+                                     uint32_t id,
+                                     int32_t *value);
+
+/* send msg throught domain socket for fd mapping */
+extern int32_t mm_camera_util_sendmsg(mm_camera_obj_t *my_obj,
+                                      void *msg,
+                                      size_t buf_size,
+                                      int sendfd);
+/* Check if hardware target is A family */
+uint8_t mm_camera_util_chip_is_a_family(void);
+
+/* mm-camera */
+extern int32_t mm_camera_open(mm_camera_obj_t *my_obj);
+extern int32_t mm_camera_close(mm_camera_obj_t *my_obj);
+extern int32_t mm_camera_close_fd(mm_camera_obj_t *my_obj);
+extern int32_t mm_camera_register_event_notify(mm_camera_obj_t *my_obj,
+                                               mm_camera_event_notify_t evt_cb,
+                                               void * user_data);
+extern int32_t mm_camera_qbuf(mm_camera_obj_t *my_obj,
+                              uint32_t ch_id,
+                              mm_camera_buf_def_t *buf);
+extern int32_t mm_camera_get_queued_buf_count(mm_camera_obj_t *my_obj,
+        uint32_t ch_id, uint32_t stream_id);
+extern int32_t mm_camera_query_capability(mm_camera_obj_t *my_obj);
+extern int32_t mm_camera_set_parms(mm_camera_obj_t *my_obj,
+                                   parm_buffer_t *parms);
+extern int32_t mm_camera_get_parms(mm_camera_obj_t *my_obj,
+                                   parm_buffer_t *parms);
+extern int32_t mm_camera_map_buf(mm_camera_obj_t *my_obj,
+                                 uint8_t buf_type,
+                                 int fd,
+                                 size_t size);
+extern int32_t mm_camera_unmap_buf(mm_camera_obj_t *my_obj,
+                                   uint8_t buf_type);
+extern int32_t mm_camera_do_auto_focus(mm_camera_obj_t *my_obj);
+extern int32_t mm_camera_cancel_auto_focus(mm_camera_obj_t *my_obj);
+extern int32_t mm_camera_prepare_snapshot(mm_camera_obj_t *my_obj,
+                                          int32_t do_af_flag);
+extern int32_t mm_camera_start_zsl_snapshot(mm_camera_obj_t *my_obj);
+extern int32_t mm_camera_stop_zsl_snapshot(mm_camera_obj_t *my_obj);
+extern int32_t mm_camera_start_zsl_snapshot_ch(mm_camera_obj_t *my_obj,
+        uint32_t ch_id);
+extern int32_t mm_camera_stop_zsl_snapshot_ch(mm_camera_obj_t *my_obj,
+        uint32_t ch_id);
+extern uint32_t mm_camera_add_channel(mm_camera_obj_t *my_obj,
+                                      mm_camera_channel_attr_t *attr,
+                                      mm_camera_buf_notify_t channel_cb,
+                                      void *userdata);
+extern int32_t mm_camera_del_channel(mm_camera_obj_t *my_obj,
+                                     uint32_t ch_id);
+extern int32_t mm_camera_get_bundle_info(mm_camera_obj_t *my_obj,
+                                         uint32_t ch_id,
+                                         cam_bundle_config_t *bundle_info);
+extern uint32_t mm_camera_add_stream(mm_camera_obj_t *my_obj,
+                                     uint32_t ch_id);
+extern int32_t mm_camera_del_stream(mm_camera_obj_t *my_obj,
+                                    uint32_t ch_id,
+                                    uint32_t stream_id);
+extern uint32_t mm_camera_link_stream(mm_camera_obj_t *my_obj,
+        uint32_t ch_id,
+        uint32_t stream_id,
+        uint32_t linked_ch_id);
+extern int32_t mm_camera_config_stream(mm_camera_obj_t *my_obj,
+                                       uint32_t ch_id,
+                                       uint32_t stream_id,
+                                       mm_camera_stream_config_t *config);
+extern int32_t mm_camera_start_channel(mm_camera_obj_t *my_obj,
+                                       uint32_t ch_id);
+extern int32_t mm_camera_stop_channel(mm_camera_obj_t *my_obj,
+                                      uint32_t ch_id);
+extern int32_t mm_camera_request_super_buf(mm_camera_obj_t *my_obj,
+                                           uint32_t ch_id,
+                                           uint32_t num_buf_requested,
+                                           uint32_t num_retro_buf_requested);
+extern int32_t mm_camera_cancel_super_buf_request(mm_camera_obj_t *my_obj,
+                                                  uint32_t ch_id);
+extern int32_t mm_camera_flush_super_buf_queue(mm_camera_obj_t *my_obj,
+                                               uint32_t ch_id,
+                                               uint32_t frame_idx);
+extern int32_t mm_camera_config_channel_notify(mm_camera_obj_t *my_obj,
+                                               uint32_t ch_id,
+                                               mm_camera_super_buf_notify_mode_t notify_mode);
+extern int32_t mm_camera_set_stream_parms(mm_camera_obj_t *my_obj,
+                                          uint32_t ch_id,
+                                          uint32_t s_id,
+                                          cam_stream_parm_buffer_t *parms);
+extern int32_t mm_camera_get_stream_parms(mm_camera_obj_t *my_obj,
+                                          uint32_t ch_id,
+                                          uint32_t s_id,
+                                          cam_stream_parm_buffer_t *parms);
+extern int32_t mm_camera_register_event_notify_internal(mm_camera_obj_t *my_obj,
+                                                        mm_camera_event_notify_t evt_cb,
+                                                        void * user_data);
+extern int32_t mm_camera_map_stream_buf(mm_camera_obj_t *my_obj,
+                                        uint32_t ch_id,
+                                        uint32_t stream_id,
+                                        uint8_t buf_type,
+                                        uint32_t buf_idx,
+                                        int32_t plane_idx,
+                                        int fd,
+                                        size_t size);
+extern int32_t mm_camera_unmap_stream_buf(mm_camera_obj_t *my_obj,
+                                          uint32_t ch_id,
+                                          uint32_t stream_id,
+                                          uint8_t buf_type,
+                                          uint32_t buf_idx,
+                                          int32_t plane_idx);
+extern int32_t mm_camera_do_stream_action(mm_camera_obj_t *my_obj,
+                                          uint32_t ch_id,
+                                          uint32_t stream_id,
+                                          void *actions);
+
+/* mm_channel */
+extern int32_t mm_channel_fsm_fn(mm_channel_t *my_obj,
+                                 mm_channel_evt_type_t evt,
+                                 void * in_val,
+                                 void * out_val);
+extern int32_t mm_channel_init(mm_channel_t *my_obj,
+                               mm_camera_channel_attr_t *attr,
+                               mm_camera_buf_notify_t channel_cb,
+                               void *userdata);
+/* qbuf is a special case that not going through state machine.
+ * This is to avoid deadlock when trying to aquire ch_lock,
+ * from the context of dataCB, but async stop is holding ch_lock */
+extern int32_t mm_channel_qbuf(mm_channel_t *my_obj,
+                               mm_camera_buf_def_t *buf);
+/* mm_stream */
+extern int32_t mm_stream_fsm_fn(mm_stream_t *my_obj,
+                                mm_stream_evt_type_t evt,
+                                void * in_val,
+                                void * out_val);
+/* Allow other stream to register dataCB at certain stream.
+ * This is for use case of video sized live snapshot,
+ * because snapshot stream need register one time CB at video stream.
+ * ext_image_mode and sensor_idx are used to identify the destinate stream
+ * to be register with dataCB. */
+extern int32_t mm_stream_reg_buf_cb(mm_stream_t *my_obj,
+                                    mm_stream_data_cb_t *val);
+extern int32_t mm_stream_map_buf(mm_stream_t *my_obj,
+                                 uint8_t buf_type,
+                                 uint32_t frame_idx,
+                                 int32_t plane_idx,
+                                 int fd,
+                                 size_t size);
+extern int32_t mm_stream_unmap_buf(mm_stream_t *my_obj,
+                                   uint8_t buf_type,
+                                   uint32_t frame_idx,
+                                   int32_t plane_idx);
+
+
+/* utiltity fucntion declared in mm-camera-inteface2.c
+ * and need be used by mm-camera and below*/
+uint32_t mm_camera_util_generate_handler(uint8_t index);
+const char * mm_camera_util_get_dev_name(uint32_t cam_handler);
+uint8_t mm_camera_util_get_index_by_handler(uint32_t handler);
+
+/* poll/cmd thread functions */
+extern int32_t mm_camera_poll_thread_launch(
+                                mm_camera_poll_thread_t * poll_cb,
+                                mm_camera_poll_thread_type_t poll_type);
+extern int32_t mm_camera_poll_thread_release(mm_camera_poll_thread_t *poll_cb);
+extern int32_t mm_camera_poll_thread_add_poll_fd(
+                                mm_camera_poll_thread_t * poll_cb,
+                                uint32_t handler,
+                                int32_t fd,
+                                mm_camera_poll_notify_t nofity_cb,
+                                void *userdata,
+                                mm_camera_call_type_t);
+extern int32_t mm_camera_poll_thread_del_poll_fd(
+                                mm_camera_poll_thread_t * poll_cb,
+                                uint32_t handler,
+                                mm_camera_call_type_t);
+extern int32_t mm_camera_poll_thread_commit_updates(
+        mm_camera_poll_thread_t * poll_cb);
+extern int32_t mm_camera_cmd_thread_launch(
+                                mm_camera_cmd_thread_t * cmd_thread,
+                                mm_camera_cmd_cb_t cb,
+                                void* user_data);
+extern int32_t mm_camera_cmd_thread_name(const char* name);
+extern int32_t mm_camera_cmd_thread_release(mm_camera_cmd_thread_t * cmd_thread);
+
+extern int32_t mm_camera_channel_advanced_capture(mm_camera_obj_t *my_obj,
+        uint32_t ch_id, mm_camera_advanced_capture_t type,
+        uint32_t trigger, void *in_value);
+#endif /* __MM_CAMERA_H__ */
diff --git a/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera_dbg.h b/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera_dbg.h
new file mode 100755
index 0000000..5953006
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera_dbg.h
@@ -0,0 +1,82 @@
+/* Copyright (c) 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_CAMERA_DBG_H__
+#define __MM_CAMERA_DBG_H__
+
+#define LOG_DEBUG 1
+/* Choose debug log level. This will not affect the error logs
+   0: turns off CDBG and CDBG_HIGH logs
+   1: turns-on CDBG_HIGH logs
+   2: turns-on CDBG_HIGH and CDBG logs */
+extern volatile uint32_t gMmCameraIntfLogLevel;
+
+#ifndef LOG_DEBUG
+  #ifdef _ANDROID_
+    #undef LOG_NIDEBUG
+    #undef LOG_TAG
+    #define LOG_NIDEBUG 0
+    #define LOG_TAG "mm-camera-intf"
+    #include <utils/Log.h>
+  #else
+    #include <stdio.h>
+    #define ALOGE CDBG
+  #endif
+  #undef CDBG
+  #define CDBG(fmt, args...) do{}while(0)
+  #define CDBG_ERROR(fmt, args...) ALOGE(fmt, ##args)
+#else
+  #ifdef _ANDROID_
+    #undef LOG_NIDEBUG
+    #undef LOG_TAG
+    #define LOG_NIDEBUG 0
+    #define LOG_TAG "mm-camera-intf"
+    #include <utils/Log.h>
+    #define CDBG(fmt, args...) ALOGD_IF(gMmCameraIntfLogLevel >= 2, fmt, ##args)
+  #else
+    #include <stdio.h>
+    #define CDBG(fmt, args...) fprintf(stderr, fmt, ##args)
+    #define ALOGE(fmt, args...) fprintf(stderr, fmt, ##args)
+  #endif
+#endif
+
+#ifdef _ANDROID_
+  #define CDBG_HIGH(fmt, args...) ALOGD_IF(gMmCameraIntfLogLevel >= 1, fmt, ##args)
+  #define CDBG_ERROR(fmt, args...)  ALOGE(fmt, ##args)
+#else
+  #define CDBG_HIGH(fmt, args...) fprintf(stderr, fmt, ##args)
+  #define CDBG_ERROR(fmt, args...) fprintf(stderr, fmt, ##args)
+#endif
+
+#ifdef _ANDROID_
+  #define CDBG_FATAL_IF(cond, ...) LOG_ALWAYS_FATAL_IF(cond, ## __VA_ARGS__)
+  #define CDBG_FATAL(...) LOG_ALWAYS_FATAL(__VA_ARGS__)
+#endif
+
+#endif /* __MM_CAMERA_DBG_H__ */
diff --git a/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera_sock.h b/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera_sock.h
new file mode 100755
index 0000000..e6f42be
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/inc/mm_camera_sock.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_CAMERA_SOCKET_H__
+#define __MM_CAMERA_SOCKET_H__
+
+#include <inttypes.h>
+#include <sys/socket.h>
+#include <sys/uio.h>
+#include <sys/un.h>
+
+typedef enum {
+    MM_CAMERA_SOCK_TYPE_UDP,
+    MM_CAMERA_SOCK_TYPE_TCP,
+} mm_camera_sock_type_t;
+
+typedef union {
+    struct sockaddr addr;
+    struct sockaddr_un addr_un;
+} mm_camera_sock_addr_t;
+
+int mm_camera_socket_create(int cam_id, mm_camera_sock_type_t sock_type);
+
+int mm_camera_socket_sendmsg(
+  int fd,
+  void *msg,
+  size_t buf_size,
+  int sendfd);
+
+int mm_camera_socket_recvmsg(
+  int fd,
+  void *msg,
+  uint32_t buf_size,
+  int *rcvdfd);
+
+void mm_camera_socket_close(int fd);
+
+#endif /*__MM_CAMERA_SOCKET_H__*/
+
diff --git a/camera/QCamera2/stack/mm-camera-interface/src/cam_intf.c b/camera/QCamera2/stack/mm-camera-interface/src/cam_intf.c
new file mode 100644
index 0000000..42ef20f
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/src/cam_intf.c
@@ -0,0 +1,729 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "cam_intf.h"
+
+void *get_pointer_of(cam_intf_parm_type_t meta_id,
+        const metadata_buffer_t* metadata)
+{
+    switch(meta_id) {
+        case CAM_INTF_META_HISTOGRAM:
+            return POINTER_OF_META(CAM_INTF_META_HISTOGRAM, metadata);
+        case CAM_INTF_META_FACE_DETECTION:
+            return POINTER_OF_META(CAM_INTF_META_FACE_DETECTION, metadata);
+        case CAM_INTF_META_AUTOFOCUS_DATA:
+            return POINTER_OF_META(CAM_INTF_META_AUTOFOCUS_DATA, metadata);
+        case CAM_INTF_PARM_UPDATE_DEBUG_LEVEL:
+            return POINTER_OF_META(CAM_INTF_PARM_UPDATE_DEBUG_LEVEL, metadata);
+        case CAM_INTF_META_CROP_DATA:
+            return POINTER_OF_META(CAM_INTF_META_CROP_DATA, metadata);
+        case CAM_INTF_META_PREP_SNAPSHOT_DONE:
+            return POINTER_OF_META(CAM_INTF_META_PREP_SNAPSHOT_DONE, metadata);
+        case CAM_INTF_META_GOOD_FRAME_IDX_RANGE:
+            return POINTER_OF_META(CAM_INTF_META_GOOD_FRAME_IDX_RANGE, metadata);
+        case CAM_INTF_META_ASD_HDR_SCENE_DATA:
+            return POINTER_OF_META(CAM_INTF_META_ASD_HDR_SCENE_DATA, metadata);
+        case CAM_INTF_META_ASD_SCENE_TYPE:
+            return POINTER_OF_META(CAM_INTF_META_ASD_SCENE_TYPE, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_ISP:
+            return POINTER_OF_META(CAM_INTF_META_CHROMATIX_LITE_ISP, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_PP:
+            return POINTER_OF_META(CAM_INTF_META_CHROMATIX_LITE_PP, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_AE:
+            return POINTER_OF_META(CAM_INTF_META_CHROMATIX_LITE_AE, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_AWB:
+            return POINTER_OF_META(CAM_INTF_META_CHROMATIX_LITE_AWB, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_AF:
+            return POINTER_OF_META(CAM_INTF_META_CHROMATIX_LITE_AF, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_ASD:
+            return POINTER_OF_META(CAM_INTF_META_CHROMATIX_LITE_ASD, metadata);
+        case CAM_INTF_BUF_DIVERT_INFO:
+            return POINTER_OF_META(CAM_INTF_BUF_DIVERT_INFO, metadata);
+        case CAM_INTF_META_FRAME_NUMBER_VALID:
+            return POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER_VALID, metadata);
+        case CAM_INTF_META_URGENT_FRAME_NUMBER_VALID:
+            return POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER_VALID, metadata);
+        case CAM_INTF_META_FRAME_DROPPED:
+            return POINTER_OF_META(CAM_INTF_META_FRAME_DROPPED, metadata);
+        case CAM_INTF_META_FRAME_NUMBER:
+            return POINTER_OF_META(CAM_INTF_META_FRAME_NUMBER, metadata);
+        case CAM_INTF_META_URGENT_FRAME_NUMBER:
+            return POINTER_OF_META(CAM_INTF_META_URGENT_FRAME_NUMBER, metadata);
+        case CAM_INTF_META_COLOR_CORRECT_MODE:
+            return POINTER_OF_META(CAM_INTF_META_COLOR_CORRECT_MODE, metadata);
+        case CAM_INTF_META_COLOR_CORRECT_TRANSFORM:
+            return POINTER_OF_META(CAM_INTF_META_COLOR_CORRECT_TRANSFORM, metadata);
+        case CAM_INTF_META_COLOR_CORRECT_GAINS:
+            return POINTER_OF_META(CAM_INTF_META_COLOR_CORRECT_GAINS, metadata);
+        case CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM:
+            return POINTER_OF_META(CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM, metadata);
+        case CAM_INTF_META_PRED_COLOR_CORRECT_GAINS:
+            return POINTER_OF_META(CAM_INTF_META_PRED_COLOR_CORRECT_GAINS, metadata);
+        case CAM_INTF_META_AEC_ROI:
+            return POINTER_OF_META(CAM_INTF_META_AEC_ROI, metadata);
+        case CAM_INTF_META_AEC_STATE:
+            return POINTER_OF_META(CAM_INTF_META_AEC_STATE, metadata);
+        case CAM_INTF_PARM_FOCUS_MODE:
+            return POINTER_OF_META(CAM_INTF_PARM_FOCUS_MODE, metadata);
+        case CAM_INTF_PARM_MANUAL_FOCUS_POS:
+            return POINTER_OF_META(CAM_INTF_PARM_MANUAL_FOCUS_POS, metadata);
+        case CAM_INTF_META_AF_ROI:
+            return POINTER_OF_META(CAM_INTF_META_AF_ROI, metadata);
+        case CAM_INTF_META_AF_STATE:
+            return POINTER_OF_META(CAM_INTF_META_AF_STATE, metadata);
+        case CAM_INTF_PARM_WHITE_BALANCE:
+            return POINTER_OF_META(CAM_INTF_PARM_WHITE_BALANCE, metadata);
+        case CAM_INTF_META_AWB_REGIONS:
+            return POINTER_OF_META(CAM_INTF_META_AWB_REGIONS, metadata);
+        case CAM_INTF_META_AWB_STATE:
+            return POINTER_OF_META(CAM_INTF_META_AWB_STATE, metadata);
+        case CAM_INTF_META_BLACK_LEVEL_LOCK:
+            return POINTER_OF_META(CAM_INTF_META_BLACK_LEVEL_LOCK, metadata);
+        case CAM_INTF_META_MODE:
+            return POINTER_OF_META(CAM_INTF_META_MODE, metadata);
+        case CAM_INTF_META_EDGE_MODE:
+            return POINTER_OF_META(CAM_INTF_META_EDGE_MODE, metadata);
+        case CAM_INTF_META_FLASH_POWER:
+            return POINTER_OF_META(CAM_INTF_META_FLASH_POWER, metadata);
+        case CAM_INTF_META_FLASH_FIRING_TIME:
+            return POINTER_OF_META(CAM_INTF_META_FLASH_FIRING_TIME, metadata);
+        case CAM_INTF_META_FLASH_MODE:
+            return POINTER_OF_META(CAM_INTF_META_FLASH_MODE, metadata);
+        case CAM_INTF_META_FLASH_STATE:
+            return POINTER_OF_META(CAM_INTF_META_FLASH_STATE, metadata);
+        case CAM_INTF_META_HOTPIXEL_MODE:
+            return POINTER_OF_META(CAM_INTF_META_HOTPIXEL_MODE, metadata);
+        case CAM_INTF_META_LENS_APERTURE:
+            return POINTER_OF_META(CAM_INTF_META_LENS_APERTURE, metadata);
+        case CAM_INTF_META_LENS_FILTERDENSITY:
+            return POINTER_OF_META(CAM_INTF_META_LENS_FILTERDENSITY, metadata);
+        case CAM_INTF_META_LENS_FOCAL_LENGTH:
+            return POINTER_OF_META(CAM_INTF_META_LENS_FOCAL_LENGTH, metadata);
+        case CAM_INTF_META_LENS_FOCUS_DISTANCE:
+            return POINTER_OF_META(CAM_INTF_META_LENS_FOCUS_DISTANCE, metadata);
+        case CAM_INTF_META_LENS_FOCUS_RANGE:
+            return POINTER_OF_META(CAM_INTF_META_LENS_FOCUS_RANGE, metadata);
+        case CAM_INTF_META_LENS_STATE:
+            return POINTER_OF_META(CAM_INTF_META_LENS_STATE, metadata);
+        case CAM_INTF_META_LENS_OPT_STAB_MODE:
+            return POINTER_OF_META(CAM_INTF_META_LENS_OPT_STAB_MODE, metadata);
+        case CAM_INTF_META_VIDEO_STAB_MODE:
+            return POINTER_OF_META(CAM_INTF_META_VIDEO_STAB_MODE, metadata);
+        case CAM_INTF_META_NOISE_REDUCTION_MODE:
+            return POINTER_OF_META(CAM_INTF_META_NOISE_REDUCTION_MODE, metadata);
+        case CAM_INTF_META_NOISE_REDUCTION_STRENGTH:
+            return POINTER_OF_META(CAM_INTF_META_NOISE_REDUCTION_STRENGTH, metadata);
+        case CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR:
+            return POINTER_OF_META(CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR, metadata);
+        case CAM_INTF_META_SCALER_CROP_REGION:
+            return POINTER_OF_META(CAM_INTF_META_SCALER_CROP_REGION, metadata);
+        case CAM_INTF_META_SCENE_FLICKER:
+            return POINTER_OF_META(CAM_INTF_META_SCENE_FLICKER, metadata);
+        case CAM_INTF_META_SENSOR_EXPOSURE_TIME:
+            return POINTER_OF_META(CAM_INTF_META_SENSOR_EXPOSURE_TIME, metadata);
+        case CAM_INTF_META_SENSOR_FRAME_DURATION:
+            return POINTER_OF_META(CAM_INTF_META_SENSOR_FRAME_DURATION, metadata);
+        case CAM_INTF_META_SENSOR_SENSITIVITY:
+            return POINTER_OF_META(CAM_INTF_META_SENSOR_SENSITIVITY, metadata);
+        case CAM_INTF_META_SENSOR_TIMESTAMP:
+            return POINTER_OF_META(CAM_INTF_META_SENSOR_TIMESTAMP, metadata);
+        case CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW:
+            return POINTER_OF_META(CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW, metadata);
+        case CAM_INTF_META_SHADING_MODE:
+            return POINTER_OF_META(CAM_INTF_META_SHADING_MODE, metadata);
+        case CAM_INTF_META_STATS_FACEDETECT_MODE:
+            return POINTER_OF_META(CAM_INTF_META_STATS_FACEDETECT_MODE, metadata);
+        case CAM_INTF_META_STATS_HISTOGRAM_MODE:
+            return POINTER_OF_META(CAM_INTF_META_STATS_HISTOGRAM_MODE, metadata);
+        case CAM_INTF_META_STATS_SHARPNESS_MAP_MODE:
+            return POINTER_OF_META(CAM_INTF_META_STATS_SHARPNESS_MAP_MODE, metadata);
+        case CAM_INTF_META_STATS_SHARPNESS_MAP:
+            return POINTER_OF_META(CAM_INTF_META_STATS_SHARPNESS_MAP, metadata);
+        case CAM_INTF_META_TONEMAP_CURVES:
+            return POINTER_OF_META(CAM_INTF_META_TONEMAP_CURVES, metadata);
+        case CAM_INTF_META_LENS_SHADING_MAP:
+            return POINTER_OF_META(CAM_INTF_META_LENS_SHADING_MAP, metadata);
+        case CAM_INTF_META_AEC_INFO:
+            return POINTER_OF_META(CAM_INTF_META_AEC_INFO, metadata);
+        case CAM_INTF_META_SENSOR_INFO:
+            return POINTER_OF_META(CAM_INTF_META_SENSOR_INFO, metadata);
+        case CAM_INTF_META_ASD_SCENE_CAPTURE_TYPE:
+            return POINTER_OF_META(CAM_INTF_META_ASD_SCENE_CAPTURE_TYPE, metadata);
+        case CAM_INTF_PARM_EFFECT:
+            return POINTER_OF_META(CAM_INTF_PARM_EFFECT, metadata);
+        case CAM_INTF_META_PRIVATE_DATA:
+            return POINTER_OF_META(CAM_INTF_META_PRIVATE_DATA, metadata);
+        case CAM_INTF_PARM_HAL_VERSION:
+            return POINTER_OF_META(CAM_INTF_PARM_HAL_VERSION, metadata);
+        case CAM_INTF_PARM_ANTIBANDING:
+            return POINTER_OF_META(CAM_INTF_PARM_ANTIBANDING, metadata);
+        case CAM_INTF_PARM_EXPOSURE_COMPENSATION:
+            return POINTER_OF_META(CAM_INTF_PARM_EXPOSURE_COMPENSATION, metadata);
+        case CAM_INTF_PARM_EV_STEP:
+            return POINTER_OF_META(CAM_INTF_PARM_EV_STEP, metadata);
+        case CAM_INTF_PARM_AEC_LOCK:
+            return POINTER_OF_META(CAM_INTF_PARM_AEC_LOCK, metadata);
+        case CAM_INTF_PARM_FPS_RANGE:
+            return POINTER_OF_META(CAM_INTF_PARM_FPS_RANGE, metadata);
+        case CAM_INTF_PARM_AWB_LOCK:
+            return POINTER_OF_META(CAM_INTF_PARM_AWB_LOCK, metadata);
+        case CAM_INTF_PARM_BESTSHOT_MODE:
+            return POINTER_OF_META(CAM_INTF_PARM_BESTSHOT_MODE, metadata);
+        case CAM_INTF_PARM_DIS_ENABLE:
+            return POINTER_OF_META(CAM_INTF_PARM_DIS_ENABLE, metadata);
+        case CAM_INTF_PARM_LED_MODE:
+            return POINTER_OF_META(CAM_INTF_PARM_LED_MODE, metadata);
+        case CAM_INTF_META_LED_MODE_OVERRIDE:
+            return POINTER_OF_META(CAM_INTF_META_LED_MODE_OVERRIDE, metadata);
+        case CAM_INTF_PARM_QUERY_FLASH4SNAP:
+            return POINTER_OF_META(CAM_INTF_PARM_QUERY_FLASH4SNAP, metadata);
+        case CAM_INTF_PARM_EXPOSURE:
+            return POINTER_OF_META(CAM_INTF_PARM_EXPOSURE, metadata);
+        case CAM_INTF_PARM_SHARPNESS:
+            return POINTER_OF_META(CAM_INTF_PARM_SHARPNESS, metadata);
+        case CAM_INTF_PARM_CONTRAST:
+            return POINTER_OF_META(CAM_INTF_PARM_CONTRAST, metadata);
+        case CAM_INTF_PARM_SATURATION:
+            return POINTER_OF_META(CAM_INTF_PARM_SATURATION, metadata);
+        case CAM_INTF_PARM_BRIGHTNESS:
+            return POINTER_OF_META(CAM_INTF_PARM_BRIGHTNESS, metadata);
+        case CAM_INTF_PARM_ISO:
+            return POINTER_OF_META(CAM_INTF_PARM_ISO, metadata);
+        case CAM_INTF_PARM_EXPOSURE_TIME:
+            return POINTER_OF_META(CAM_INTF_PARM_EXPOSURE_TIME, metadata);
+        case CAM_INTF_PARM_ZOOM:
+            return POINTER_OF_META(CAM_INTF_PARM_ZOOM, metadata);
+        case CAM_INTF_PARM_ROLLOFF:
+            return POINTER_OF_META(CAM_INTF_PARM_ROLLOFF, metadata);
+        case CAM_INTF_PARM_MODE:
+            return POINTER_OF_META(CAM_INTF_PARM_MODE, metadata);
+        case CAM_INTF_PARM_AEC_ALGO_TYPE:
+            return POINTER_OF_META(CAM_INTF_PARM_AEC_ALGO_TYPE, metadata);
+        case CAM_INTF_PARM_FOCUS_ALGO_TYPE:
+            return POINTER_OF_META(CAM_INTF_PARM_FOCUS_ALGO_TYPE, metadata);
+        case CAM_INTF_PARM_AEC_ROI:
+            return POINTER_OF_META(CAM_INTF_PARM_AEC_ROI, metadata);
+        case CAM_INTF_PARM_AF_ROI:
+            return POINTER_OF_META(CAM_INTF_PARM_AF_ROI, metadata);
+        case CAM_INTF_PARM_SCE_FACTOR:
+            return POINTER_OF_META(CAM_INTF_PARM_SCE_FACTOR, metadata);
+        case CAM_INTF_PARM_FD:
+            return POINTER_OF_META(CAM_INTF_PARM_FD, metadata);
+        case CAM_INTF_PARM_MCE:
+            return POINTER_OF_META(CAM_INTF_PARM_MCE, metadata);
+        case CAM_INTF_PARM_HFR:
+            return POINTER_OF_META(CAM_INTF_PARM_HFR, metadata);
+        case CAM_INTF_PARM_REDEYE_REDUCTION:
+            return POINTER_OF_META(CAM_INTF_PARM_REDEYE_REDUCTION, metadata);
+        case CAM_INTF_PARM_WAVELET_DENOISE:
+            return POINTER_OF_META(CAM_INTF_PARM_WAVELET_DENOISE, metadata);
+        case CAM_INTF_PARM_TEMPORAL_DENOISE:
+            return POINTER_OF_META(CAM_INTF_PARM_TEMPORAL_DENOISE, metadata);
+        case CAM_INTF_PARM_HISTOGRAM:
+            return POINTER_OF_META(CAM_INTF_PARM_HISTOGRAM, metadata);
+        case CAM_INTF_PARM_ASD_ENABLE:
+            return POINTER_OF_META(CAM_INTF_PARM_ASD_ENABLE, metadata);
+        case CAM_INTF_PARM_RECORDING_HINT:
+            return POINTER_OF_META(CAM_INTF_PARM_RECORDING_HINT, metadata);
+        case CAM_INTF_PARM_HDR:
+            return POINTER_OF_META(CAM_INTF_PARM_HDR, metadata);
+        case CAM_INTF_PARM_FRAMESKIP:
+            return POINTER_OF_META(CAM_INTF_PARM_FRAMESKIP, metadata);
+        case CAM_INTF_PARM_ZSL_MODE:
+            return POINTER_OF_META(CAM_INTF_PARM_ZSL_MODE, metadata);
+        case CAM_INTF_PARM_HDR_NEED_1X:
+            return POINTER_OF_META(CAM_INTF_PARM_HDR_NEED_1X, metadata);
+        case CAM_INTF_PARM_LOCK_CAF:
+            return POINTER_OF_META(CAM_INTF_PARM_LOCK_CAF, metadata);
+        case CAM_INTF_PARM_VIDEO_HDR:
+            return POINTER_OF_META(CAM_INTF_PARM_VIDEO_HDR, metadata);
+        case CAM_INTF_PARM_VT:
+            return POINTER_OF_META(CAM_INTF_PARM_VT, metadata);
+        case CAM_INTF_PARM_GET_CHROMATIX:
+            return POINTER_OF_META(CAM_INTF_PARM_GET_CHROMATIX, metadata);
+        case CAM_INTF_PARM_SET_RELOAD_CHROMATIX:
+            return POINTER_OF_META(CAM_INTF_PARM_SET_RELOAD_CHROMATIX, metadata);
+        case CAM_INTF_PARM_GET_AFTUNE:
+            return POINTER_OF_META(CAM_INTF_PARM_GET_AFTUNE, metadata);
+        case CAM_INTF_PARM_SET_RELOAD_AFTUNE:
+            return POINTER_OF_META(CAM_INTF_PARM_SET_RELOAD_AFTUNE, metadata);
+        case CAM_INTF_PARM_SET_AUTOFOCUSTUNING:
+            return POINTER_OF_META(CAM_INTF_PARM_SET_AUTOFOCUSTUNING, metadata);
+        case CAM_INTF_PARM_SET_VFE_COMMAND:
+            return POINTER_OF_META(CAM_INTF_PARM_SET_VFE_COMMAND, metadata);
+        case CAM_INTF_PARM_SET_PP_COMMAND:
+            return POINTER_OF_META(CAM_INTF_PARM_SET_PP_COMMAND, metadata);
+        case CAM_INTF_PARM_MAX_DIMENSION:
+            return POINTER_OF_META(CAM_INTF_PARM_MAX_DIMENSION, metadata);
+        case CAM_INTF_PARM_RAW_DIMENSION:
+            return POINTER_OF_META(CAM_INTF_PARM_RAW_DIMENSION, metadata);
+        case CAM_INTF_PARM_TINTLESS:
+            return POINTER_OF_META(CAM_INTF_PARM_TINTLESS, metadata);
+        case CAM_INTF_PARM_WB_MANUAL:
+            return POINTER_OF_META(CAM_INTF_PARM_WB_MANUAL, metadata);
+        case CAM_INTF_PARM_EZTUNE_CMD:
+            return POINTER_OF_META(CAM_INTF_PARM_EZTUNE_CMD, metadata);
+        case CAM_INTF_PARM_INT_EVT:
+            return POINTER_OF_META(CAM_INTF_PARM_INT_EVT, metadata);
+        case CAM_INTF_PARM_RDI_MODE:
+            return POINTER_OF_META(CAM_INTF_PARM_RDI_MODE, metadata);
+        case CAM_INTF_PARM_BURST_NUM:
+            return POINTER_OF_META(CAM_INTF_PARM_BURST_NUM, metadata);
+        case CAM_INTF_PARM_RETRO_BURST_NUM:
+            return POINTER_OF_META(CAM_INTF_PARM_RETRO_BURST_NUM, metadata);
+        case CAM_INTF_PARM_BURST_LED_ON_PERIOD:
+            return POINTER_OF_META(CAM_INTF_PARM_BURST_LED_ON_PERIOD, metadata);
+        case CAM_INTF_PARM_LONGSHOT_ENABLE:
+            return POINTER_OF_META(CAM_INTF_PARM_LONGSHOT_ENABLE, metadata);
+        case CAM_INTF_META_STREAM_INFO:
+            return POINTER_OF_META(CAM_INTF_META_STREAM_INFO, metadata);
+        case CAM_INTF_META_AEC_MODE:
+            return POINTER_OF_META(CAM_INTF_META_AEC_MODE, metadata);
+        case CAM_INTF_META_AEC_PRECAPTURE_TRIGGER:
+            return POINTER_OF_META(CAM_INTF_META_AEC_PRECAPTURE_TRIGGER, metadata);
+        case CAM_INTF_META_AF_TRIGGER:
+            return POINTER_OF_META(CAM_INTF_META_AF_TRIGGER, metadata);
+        case CAM_INTF_META_CAPTURE_INTENT:
+            return POINTER_OF_META(CAM_INTF_META_CAPTURE_INTENT, metadata);
+        case CAM_INTF_META_DEMOSAIC:
+            return POINTER_OF_META(CAM_INTF_META_DEMOSAIC, metadata);
+        case CAM_INTF_META_SHARPNESS_STRENGTH:
+            return POINTER_OF_META(CAM_INTF_META_SHARPNESS_STRENGTH, metadata);
+        case CAM_INTF_META_GEOMETRIC_MODE:
+            return POINTER_OF_META(CAM_INTF_META_GEOMETRIC_MODE, metadata);
+        case CAM_INTF_META_GEOMETRIC_STRENGTH:
+            return POINTER_OF_META(CAM_INTF_META_GEOMETRIC_STRENGTH, metadata);
+        case CAM_INTF_META_LENS_SHADING_MAP_MODE:
+            return POINTER_OF_META(CAM_INTF_META_LENS_SHADING_MAP_MODE, metadata);
+        case CAM_INTF_META_SHADING_STRENGTH:
+            return POINTER_OF_META(CAM_INTF_META_SHADING_STRENGTH, metadata);
+        case CAM_INTF_META_TONEMAP_MODE:
+            return POINTER_OF_META(CAM_INTF_META_TONEMAP_MODE, metadata);
+        case CAM_INTF_META_AWB_INFO:
+            return POINTER_OF_META(CAM_INTF_META_AWB_INFO, metadata);
+        case CAM_INTF_META_FOCUS_POSITION:
+            return POINTER_OF_META(CAM_INTF_META_FOCUS_POSITION, metadata);
+        case CAM_INTF_META_STREAM_ID:
+            return POINTER_OF_META(CAM_INTF_META_STREAM_ID, metadata);
+        case CAM_INTF_PARM_STATS_DEBUG_MASK:
+            return POINTER_OF_META(CAM_INTF_PARM_STATS_DEBUG_MASK, metadata);
+        case CAM_INTF_PARM_STATS_AF_PAAF:
+            return POINTER_OF_META(CAM_INTF_PARM_STATS_AF_PAAF, metadata);
+        case CAM_INTF_PARM_FOCUS_BRACKETING:
+            return POINTER_OF_META(CAM_INTF_PARM_FOCUS_BRACKETING, metadata);
+        case CAM_INTF_PARM_FLASH_BRACKETING:
+            return POINTER_OF_META(CAM_INTF_PARM_FLASH_BRACKETING, metadata);
+        case CAM_INTF_META_JPEG_GPS_COORDINATES:
+            return POINTER_OF_META(CAM_INTF_META_JPEG_GPS_COORDINATES, metadata);
+        case CAM_INTF_META_JPEG_GPS_PROC_METHODS:
+            return POINTER_OF_META(CAM_INTF_META_JPEG_GPS_PROC_METHODS, metadata);
+        case CAM_INTF_META_JPEG_GPS_TIMESTAMP:
+            return POINTER_OF_META(CAM_INTF_META_JPEG_GPS_TIMESTAMP, metadata);
+        case CAM_INTF_META_JPEG_ORIENTATION:
+            return POINTER_OF_META(CAM_INTF_META_JPEG_ORIENTATION, metadata);
+        case CAM_INTF_META_JPEG_QUALITY:
+            return POINTER_OF_META(CAM_INTF_META_JPEG_QUALITY, metadata);
+        case CAM_INTF_META_JPEG_THUMB_QUALITY:
+            return POINTER_OF_META(CAM_INTF_META_JPEG_THUMB_QUALITY, metadata);
+        case CAM_INTF_META_JPEG_THUMB_SIZE:
+            return POINTER_OF_META(CAM_INTF_META_JPEG_THUMB_SIZE, metadata);
+        case CAM_INTF_META_TEST_PATTERN_DATA:
+            return POINTER_OF_META(CAM_INTF_META_TEST_PATTERN_DATA, metadata);
+        case CAM_INTF_META_PROFILE_TONE_CURVE:
+            return POINTER_OF_META(CAM_INTF_META_PROFILE_TONE_CURVE, metadata);
+        case CAM_INTF_META_OTP_WB_GRGB:
+            return POINTER_OF_META(CAM_INTF_META_OTP_WB_GRGB, metadata);
+        case CAM_INTF_PARM_CAC:
+            return POINTER_OF_META(CAM_INTF_PARM_CAC, metadata);
+        case CAM_INTF_META_NEUTRAL_COL_POINT:
+            return POINTER_OF_META(CAM_INTF_META_NEUTRAL_COL_POINT, metadata);
+        case CAM_INTF_PARM_CDS_MODE:
+            return POINTER_OF_META(CAM_INTF_PARM_CDS_MODE, metadata);
+        case CAM_INTF_PARM_ROTATION:
+          return POINTER_OF_META(CAM_INTF_PARM_ROTATION, metadata);
+        case CAM_INTF_PARM_TONE_MAP_MODE:
+          return POINTER_OF_META(CAM_INTF_PARM_TONE_MAP_MODE, metadata);
+        case CAM_INTF_META_IMGLIB:
+          return POINTER_OF_META(CAM_INTF_META_IMGLIB, metadata);
+        case CAM_INTF_META_USE_AV_TIMER:
+            return POINTER_OF_META(CAM_INTF_META_USE_AV_TIMER, metadata);
+        default:
+            return NULL;
+    }
+}
+
+uint32_t get_size_of(cam_intf_parm_type_t param_id)
+{
+    metadata_buffer_t* metadata = NULL;
+    switch(param_id) {
+        case CAM_INTF_META_HISTOGRAM:
+            return SIZE_OF_PARAM(CAM_INTF_META_HISTOGRAM, metadata);
+        case CAM_INTF_META_FACE_DETECTION:
+            return SIZE_OF_PARAM(CAM_INTF_META_FACE_DETECTION, metadata);
+        case CAM_INTF_META_AUTOFOCUS_DATA:
+            return SIZE_OF_PARAM(CAM_INTF_META_AUTOFOCUS_DATA, metadata);
+        case CAM_INTF_PARM_UPDATE_DEBUG_LEVEL:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_UPDATE_DEBUG_LEVEL, metadata);
+        case CAM_INTF_META_CROP_DATA:
+            return SIZE_OF_PARAM(CAM_INTF_META_CROP_DATA, metadata);
+        case CAM_INTF_META_PREP_SNAPSHOT_DONE:
+            return SIZE_OF_PARAM(CAM_INTF_META_PREP_SNAPSHOT_DONE, metadata);
+        case CAM_INTF_META_GOOD_FRAME_IDX_RANGE:
+            return SIZE_OF_PARAM(CAM_INTF_META_GOOD_FRAME_IDX_RANGE, metadata);
+        case CAM_INTF_META_ASD_HDR_SCENE_DATA:
+            return SIZE_OF_PARAM(CAM_INTF_META_ASD_HDR_SCENE_DATA, metadata);
+        case CAM_INTF_META_ASD_SCENE_TYPE:
+            return SIZE_OF_PARAM(CAM_INTF_META_ASD_SCENE_TYPE, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_ISP:
+            return SIZE_OF_PARAM(CAM_INTF_META_CHROMATIX_LITE_ISP, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_PP:
+            return SIZE_OF_PARAM(CAM_INTF_META_CHROMATIX_LITE_PP, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_AE:
+            return SIZE_OF_PARAM(CAM_INTF_META_CHROMATIX_LITE_AE, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_AWB:
+            return SIZE_OF_PARAM(CAM_INTF_META_CHROMATIX_LITE_AWB, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_AF:
+            return SIZE_OF_PARAM(CAM_INTF_META_CHROMATIX_LITE_AF, metadata);
+        case CAM_INTF_META_CHROMATIX_LITE_ASD:
+            return SIZE_OF_PARAM(CAM_INTF_META_CHROMATIX_LITE_ASD, metadata);
+        case CAM_INTF_BUF_DIVERT_INFO:
+            return SIZE_OF_PARAM(CAM_INTF_BUF_DIVERT_INFO, metadata);
+        case CAM_INTF_META_FRAME_NUMBER_VALID:
+            return SIZE_OF_PARAM(CAM_INTF_META_FRAME_NUMBER_VALID, metadata);
+        case CAM_INTF_META_URGENT_FRAME_NUMBER_VALID:
+            return SIZE_OF_PARAM(CAM_INTF_META_URGENT_FRAME_NUMBER_VALID, metadata);
+        case CAM_INTF_META_FRAME_DROPPED:
+            return SIZE_OF_PARAM(CAM_INTF_META_FRAME_DROPPED, metadata);
+        case CAM_INTF_META_FRAME_NUMBER:
+            return SIZE_OF_PARAM(CAM_INTF_META_FRAME_NUMBER, metadata);
+        case CAM_INTF_META_URGENT_FRAME_NUMBER:
+            return SIZE_OF_PARAM(CAM_INTF_META_URGENT_FRAME_NUMBER, metadata);
+        case CAM_INTF_META_COLOR_CORRECT_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_COLOR_CORRECT_MODE, metadata);
+        case CAM_INTF_META_COLOR_CORRECT_TRANSFORM:
+            return SIZE_OF_PARAM(CAM_INTF_META_COLOR_CORRECT_TRANSFORM, metadata);
+        case CAM_INTF_META_COLOR_CORRECT_GAINS:
+            return SIZE_OF_PARAM(CAM_INTF_META_COLOR_CORRECT_GAINS, metadata);
+        case CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM:
+            return SIZE_OF_PARAM(CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM, metadata);
+        case CAM_INTF_META_PRED_COLOR_CORRECT_GAINS:
+            return SIZE_OF_PARAM(CAM_INTF_META_PRED_COLOR_CORRECT_GAINS, metadata);
+        case CAM_INTF_META_AEC_ROI:
+            return SIZE_OF_PARAM(CAM_INTF_META_AEC_ROI, metadata);
+        case CAM_INTF_META_AEC_STATE:
+            return SIZE_OF_PARAM(CAM_INTF_META_AEC_STATE, metadata);
+        case CAM_INTF_PARM_FOCUS_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_FOCUS_MODE, metadata);
+        case CAM_INTF_PARM_MANUAL_FOCUS_POS:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_MANUAL_FOCUS_POS, metadata);
+        case CAM_INTF_META_AF_ROI:
+            return SIZE_OF_PARAM(CAM_INTF_META_AF_ROI, metadata);
+        case CAM_INTF_META_AF_STATE:
+            return SIZE_OF_PARAM(CAM_INTF_META_AF_STATE, metadata);
+        case CAM_INTF_PARM_WHITE_BALANCE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_WHITE_BALANCE, metadata);
+        case CAM_INTF_META_AWB_REGIONS:
+            return SIZE_OF_PARAM(CAM_INTF_META_AWB_REGIONS, metadata);
+        case CAM_INTF_META_AWB_STATE:
+            return SIZE_OF_PARAM(CAM_INTF_META_AWB_STATE, metadata);
+        case CAM_INTF_META_BLACK_LEVEL_LOCK:
+            return SIZE_OF_PARAM(CAM_INTF_META_BLACK_LEVEL_LOCK, metadata);
+        case CAM_INTF_META_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_MODE, metadata);
+        case CAM_INTF_META_EDGE_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_EDGE_MODE, metadata);
+        case CAM_INTF_META_FLASH_POWER:
+            return SIZE_OF_PARAM(CAM_INTF_META_FLASH_POWER, metadata);
+        case CAM_INTF_META_FLASH_FIRING_TIME:
+            return SIZE_OF_PARAM(CAM_INTF_META_FLASH_FIRING_TIME, metadata);
+        case CAM_INTF_META_FLASH_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_FLASH_MODE, metadata);
+        case CAM_INTF_META_FLASH_STATE:
+            return SIZE_OF_PARAM(CAM_INTF_META_FLASH_STATE, metadata);
+        case CAM_INTF_META_HOTPIXEL_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_HOTPIXEL_MODE, metadata);
+        case CAM_INTF_META_LENS_APERTURE:
+            return SIZE_OF_PARAM(CAM_INTF_META_LENS_APERTURE, metadata);
+        case CAM_INTF_META_LENS_FILTERDENSITY:
+            return SIZE_OF_PARAM(CAM_INTF_META_LENS_FILTERDENSITY, metadata);
+        case CAM_INTF_META_LENS_FOCAL_LENGTH:
+            return SIZE_OF_PARAM(CAM_INTF_META_LENS_FOCAL_LENGTH, metadata);
+        case CAM_INTF_META_LENS_FOCUS_DISTANCE:
+            return SIZE_OF_PARAM(CAM_INTF_META_LENS_FOCUS_DISTANCE, metadata);
+        case CAM_INTF_META_LENS_FOCUS_RANGE:
+            return SIZE_OF_PARAM(CAM_INTF_META_LENS_FOCUS_RANGE, metadata);
+        case CAM_INTF_META_LENS_STATE:
+            return SIZE_OF_PARAM(CAM_INTF_META_LENS_STATE, metadata);
+        case CAM_INTF_META_LENS_OPT_STAB_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_LENS_OPT_STAB_MODE, metadata);
+        case CAM_INTF_META_VIDEO_STAB_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_VIDEO_STAB_MODE, metadata);
+        case CAM_INTF_META_NOISE_REDUCTION_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_NOISE_REDUCTION_MODE, metadata);
+        case CAM_INTF_META_NOISE_REDUCTION_STRENGTH:
+            return SIZE_OF_PARAM(CAM_INTF_META_NOISE_REDUCTION_STRENGTH, metadata);
+        case CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR:
+            return SIZE_OF_PARAM(CAM_INTF_META_EFFECTIVE_EXPOSURE_FACTOR, metadata);
+        case CAM_INTF_META_SCALER_CROP_REGION:
+            return SIZE_OF_PARAM(CAM_INTF_META_SCALER_CROP_REGION, metadata);
+        case CAM_INTF_META_SCENE_FLICKER:
+            return SIZE_OF_PARAM(CAM_INTF_META_SCENE_FLICKER, metadata);
+        case CAM_INTF_META_SENSOR_EXPOSURE_TIME:
+            return SIZE_OF_PARAM(CAM_INTF_META_SENSOR_EXPOSURE_TIME, metadata);
+        case CAM_INTF_META_SENSOR_FRAME_DURATION:
+            return SIZE_OF_PARAM(CAM_INTF_META_SENSOR_FRAME_DURATION, metadata);
+        case CAM_INTF_META_SENSOR_SENSITIVITY:
+            return SIZE_OF_PARAM(CAM_INTF_META_SENSOR_SENSITIVITY, metadata);
+        case CAM_INTF_META_SENSOR_TIMESTAMP:
+            return SIZE_OF_PARAM(CAM_INTF_META_SENSOR_TIMESTAMP, metadata);
+        case CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW:
+            return SIZE_OF_PARAM(CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW, metadata);
+        case CAM_INTF_META_SHADING_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_SHADING_MODE, metadata);
+        case CAM_INTF_META_STATS_FACEDETECT_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_STATS_FACEDETECT_MODE, metadata);
+        case CAM_INTF_META_STATS_HISTOGRAM_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_STATS_HISTOGRAM_MODE, metadata);
+        case CAM_INTF_META_STATS_SHARPNESS_MAP_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_STATS_SHARPNESS_MAP_MODE, metadata);
+        case CAM_INTF_META_STATS_SHARPNESS_MAP:
+            return SIZE_OF_PARAM(CAM_INTF_META_STATS_SHARPNESS_MAP, metadata);
+        case CAM_INTF_META_TONEMAP_CURVES:
+            return SIZE_OF_PARAM(CAM_INTF_META_TONEMAP_CURVES, metadata);
+        case CAM_INTF_META_LENS_SHADING_MAP:
+            return SIZE_OF_PARAM(CAM_INTF_META_LENS_SHADING_MAP, metadata);
+        case CAM_INTF_META_AEC_INFO:
+            return SIZE_OF_PARAM(CAM_INTF_META_AEC_INFO, metadata);
+        case CAM_INTF_META_SENSOR_INFO:
+            return SIZE_OF_PARAM(CAM_INTF_META_SENSOR_INFO, metadata);
+        case CAM_INTF_META_ASD_SCENE_CAPTURE_TYPE:
+            return SIZE_OF_PARAM(CAM_INTF_META_ASD_SCENE_CAPTURE_TYPE, metadata);
+        case CAM_INTF_PARM_EFFECT:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_EFFECT, metadata);
+        case CAM_INTF_META_PRIVATE_DATA:
+            return SIZE_OF_PARAM(CAM_INTF_META_PRIVATE_DATA, metadata);
+        case CAM_INTF_PARM_HAL_VERSION:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_HAL_VERSION, metadata);
+        case CAM_INTF_PARM_ANTIBANDING:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_ANTIBANDING, metadata);
+        case CAM_INTF_PARM_EXPOSURE_COMPENSATION:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_EXPOSURE_COMPENSATION, metadata);
+        case CAM_INTF_PARM_EV_STEP:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_EV_STEP, metadata);
+        case CAM_INTF_PARM_AEC_LOCK:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_AEC_LOCK, metadata);
+        case CAM_INTF_PARM_FPS_RANGE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_FPS_RANGE, metadata);
+        case CAM_INTF_PARM_AWB_LOCK:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_AWB_LOCK, metadata);
+        case CAM_INTF_PARM_BESTSHOT_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_BESTSHOT_MODE, metadata);
+        case CAM_INTF_PARM_DIS_ENABLE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_DIS_ENABLE, metadata);
+        case CAM_INTF_PARM_LED_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_LED_MODE, metadata);
+        case CAM_INTF_META_LED_MODE_OVERRIDE:
+            return SIZE_OF_PARAM(CAM_INTF_META_LED_MODE_OVERRIDE, metadata);
+        case CAM_INTF_PARM_QUERY_FLASH4SNAP:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_QUERY_FLASH4SNAP, metadata);
+        case CAM_INTF_PARM_EXPOSURE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_EXPOSURE, metadata);
+        case CAM_INTF_PARM_SHARPNESS:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_SHARPNESS, metadata);
+        case CAM_INTF_PARM_CONTRAST:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_CONTRAST, metadata);
+        case CAM_INTF_PARM_SATURATION:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_SATURATION, metadata);
+        case CAM_INTF_PARM_BRIGHTNESS:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_BRIGHTNESS, metadata);
+        case CAM_INTF_PARM_ISO:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_ISO, metadata);
+        case CAM_INTF_PARM_EXPOSURE_TIME:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_EXPOSURE_TIME, metadata);
+        case CAM_INTF_PARM_ZOOM:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_ZOOM, metadata);
+        case CAM_INTF_PARM_ROLLOFF:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_ROLLOFF, metadata);
+        case CAM_INTF_PARM_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_MODE, metadata);
+        case CAM_INTF_PARM_AEC_ALGO_TYPE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_AEC_ALGO_TYPE, metadata);
+        case CAM_INTF_PARM_FOCUS_ALGO_TYPE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_FOCUS_ALGO_TYPE, metadata);
+        case CAM_INTF_PARM_AEC_ROI:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_AEC_ROI, metadata);
+        case CAM_INTF_PARM_AF_ROI:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_AF_ROI, metadata);
+        case CAM_INTF_PARM_SCE_FACTOR:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_SCE_FACTOR, metadata);
+        case CAM_INTF_PARM_FD:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_FD, metadata);
+        case CAM_INTF_PARM_MCE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_MCE, metadata);
+        case CAM_INTF_PARM_HFR:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_HFR, metadata);
+        case CAM_INTF_PARM_REDEYE_REDUCTION:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_REDEYE_REDUCTION, metadata);
+        case CAM_INTF_PARM_WAVELET_DENOISE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_WAVELET_DENOISE, metadata);
+        case CAM_INTF_PARM_TEMPORAL_DENOISE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_TEMPORAL_DENOISE, metadata);
+        case CAM_INTF_PARM_HISTOGRAM:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_HISTOGRAM, metadata);
+        case CAM_INTF_PARM_ASD_ENABLE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_ASD_ENABLE, metadata);
+        case CAM_INTF_PARM_RECORDING_HINT:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_RECORDING_HINT, metadata);
+        case CAM_INTF_PARM_HDR:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_HDR, metadata);
+        case CAM_INTF_PARM_FRAMESKIP:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_FRAMESKIP, metadata);
+        case CAM_INTF_PARM_ZSL_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_ZSL_MODE, metadata);
+        case CAM_INTF_PARM_HDR_NEED_1X:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_HDR_NEED_1X, metadata);
+        case CAM_INTF_PARM_LOCK_CAF:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_LOCK_CAF, metadata);
+        case CAM_INTF_PARM_VIDEO_HDR:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_VIDEO_HDR, metadata);
+        case CAM_INTF_PARM_VT:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_VT, metadata);
+        case CAM_INTF_PARM_GET_CHROMATIX:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_GET_CHROMATIX, metadata);
+        case CAM_INTF_PARM_SET_RELOAD_CHROMATIX:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_SET_RELOAD_CHROMATIX, metadata);
+        case CAM_INTF_PARM_GET_AFTUNE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_GET_AFTUNE, metadata);
+        case CAM_INTF_PARM_SET_RELOAD_AFTUNE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_SET_RELOAD_AFTUNE, metadata);
+        case CAM_INTF_PARM_SET_AUTOFOCUSTUNING:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_SET_AUTOFOCUSTUNING, metadata);
+        case CAM_INTF_PARM_SET_VFE_COMMAND:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_SET_VFE_COMMAND, metadata);
+        case CAM_INTF_PARM_SET_PP_COMMAND:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_SET_PP_COMMAND, metadata);
+        case CAM_INTF_PARM_MAX_DIMENSION:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_MAX_DIMENSION, metadata);
+        case CAM_INTF_PARM_RAW_DIMENSION:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_RAW_DIMENSION, metadata);
+        case CAM_INTF_PARM_TINTLESS:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_TINTLESS, metadata);
+        case CAM_INTF_PARM_WB_MANUAL:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_WB_MANUAL, metadata);
+        case CAM_INTF_PARM_EZTUNE_CMD:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_EZTUNE_CMD, metadata);
+        case CAM_INTF_PARM_INT_EVT:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_INT_EVT, metadata);
+        case CAM_INTF_PARM_RDI_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_RDI_MODE, metadata);
+        case CAM_INTF_PARM_BURST_NUM:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_BURST_NUM, metadata);
+        case CAM_INTF_PARM_RETRO_BURST_NUM:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_RETRO_BURST_NUM, metadata);
+        case CAM_INTF_PARM_BURST_LED_ON_PERIOD:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_BURST_LED_ON_PERIOD, metadata);
+        case CAM_INTF_PARM_LONGSHOT_ENABLE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_LONGSHOT_ENABLE, metadata);
+        case CAM_INTF_META_STREAM_INFO:
+            return SIZE_OF_PARAM(CAM_INTF_META_STREAM_INFO, metadata);
+        case CAM_INTF_META_AEC_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_AEC_MODE, metadata);
+        case CAM_INTF_META_AEC_PRECAPTURE_TRIGGER:
+            return SIZE_OF_PARAM(CAM_INTF_META_AEC_PRECAPTURE_TRIGGER, metadata);
+        case CAM_INTF_META_AF_TRIGGER:
+            return SIZE_OF_PARAM(CAM_INTF_META_AF_TRIGGER, metadata);
+        case CAM_INTF_META_CAPTURE_INTENT:
+            return SIZE_OF_PARAM(CAM_INTF_META_CAPTURE_INTENT, metadata);
+        case CAM_INTF_META_DEMOSAIC:
+            return SIZE_OF_PARAM(CAM_INTF_META_DEMOSAIC, metadata);
+        case CAM_INTF_META_SHARPNESS_STRENGTH:
+            return SIZE_OF_PARAM(CAM_INTF_META_SHARPNESS_STRENGTH, metadata);
+        case CAM_INTF_META_GEOMETRIC_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_GEOMETRIC_MODE, metadata);
+        case CAM_INTF_META_GEOMETRIC_STRENGTH:
+            return SIZE_OF_PARAM(CAM_INTF_META_GEOMETRIC_STRENGTH, metadata);
+        case CAM_INTF_META_LENS_SHADING_MAP_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_LENS_SHADING_MAP_MODE, metadata);
+        case CAM_INTF_META_SHADING_STRENGTH:
+            return SIZE_OF_PARAM(CAM_INTF_META_SHADING_STRENGTH, metadata);
+        case CAM_INTF_META_TONEMAP_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_META_TONEMAP_MODE, metadata);
+        case CAM_INTF_META_AWB_INFO:
+            return SIZE_OF_PARAM(CAM_INTF_META_AWB_INFO, metadata);
+        case CAM_INTF_META_FOCUS_POSITION:
+            return SIZE_OF_PARAM(CAM_INTF_META_FOCUS_POSITION, metadata);
+        case CAM_INTF_META_STREAM_ID:
+            return SIZE_OF_PARAM(CAM_INTF_META_STREAM_ID, metadata);
+        case CAM_INTF_PARM_STATS_DEBUG_MASK:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_STATS_DEBUG_MASK, metadata);
+        case CAM_INTF_PARM_STATS_AF_PAAF:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_STATS_AF_PAAF, metadata);
+        case CAM_INTF_PARM_FOCUS_BRACKETING:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_FOCUS_BRACKETING, metadata);
+        case CAM_INTF_PARM_FLASH_BRACKETING:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_FLASH_BRACKETING, metadata);
+        case CAM_INTF_META_JPEG_GPS_COORDINATES:
+            return SIZE_OF_PARAM(CAM_INTF_META_JPEG_GPS_COORDINATES, metadata);
+        case CAM_INTF_META_JPEG_GPS_PROC_METHODS:
+            return SIZE_OF_PARAM(CAM_INTF_META_JPEG_GPS_PROC_METHODS, metadata);
+        case CAM_INTF_META_JPEG_GPS_TIMESTAMP:
+            return SIZE_OF_PARAM(CAM_INTF_META_JPEG_GPS_TIMESTAMP, metadata);
+        case CAM_INTF_META_JPEG_ORIENTATION:
+            return SIZE_OF_PARAM(CAM_INTF_META_JPEG_ORIENTATION, metadata);
+        case CAM_INTF_META_JPEG_QUALITY:
+            return SIZE_OF_PARAM(CAM_INTF_META_JPEG_QUALITY, metadata);
+        case CAM_INTF_META_JPEG_THUMB_QUALITY:
+            return SIZE_OF_PARAM(CAM_INTF_META_JPEG_THUMB_QUALITY, metadata);
+        case CAM_INTF_META_JPEG_THUMB_SIZE:
+            return SIZE_OF_PARAM(CAM_INTF_META_JPEG_THUMB_SIZE, metadata);
+        case CAM_INTF_META_TEST_PATTERN_DATA:
+            return SIZE_OF_PARAM(CAM_INTF_META_TEST_PATTERN_DATA, metadata);
+        case CAM_INTF_META_PROFILE_TONE_CURVE:
+            return SIZE_OF_PARAM(CAM_INTF_META_PROFILE_TONE_CURVE, metadata);
+        case CAM_INTF_META_OTP_WB_GRGB:
+            return SIZE_OF_PARAM(CAM_INTF_META_OTP_WB_GRGB, metadata);
+        case CAM_INTF_PARM_CAC:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_CAC, metadata);
+        case CAM_INTF_META_NEUTRAL_COL_POINT:
+            return SIZE_OF_PARAM(CAM_INTF_META_NEUTRAL_COL_POINT, metadata);
+        case CAM_INTF_PARM_CDS_MODE:
+            return SIZE_OF_PARAM(CAM_INTF_PARM_CDS_MODE, metadata);
+        case CAM_INTF_PARM_ROTATION:
+          return SIZE_OF_PARAM(CAM_INTF_PARM_ROTATION, metadata);
+        case CAM_INTF_PARM_TONE_MAP_MODE:
+          return SIZE_OF_PARAM(CAM_INTF_PARM_TONE_MAP_MODE, metadata);
+        case CAM_INTF_META_IMGLIB:
+          return SIZE_OF_PARAM(CAM_INTF_META_IMGLIB, metadata);
+        case CAM_INTF_META_USE_AV_TIMER:
+            return SIZE_OF_PARAM(CAM_INTF_META_USE_AV_TIMER, metadata);
+        default:
+            return 0;
+    }
+    return 0;
+}
diff --git a/camera/QCamera2/stack/mm-camera-interface/src/mm_camera.c b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera.c
new file mode 100644
index 0000000..1cf357f
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera.c
@@ -0,0 +1,1971 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <cutils/properties.h>
+#include <stdlib.h>
+
+#include <cam_semaphore.h>
+
+#include "mm_camera_dbg.h"
+#include "mm_camera_sock.h"
+#include "mm_camera_interface.h"
+#include "mm_camera.h"
+
+#define SET_PARM_BIT32(parm, parm_arr) \
+    (parm_arr[parm/32] |= (1<<(parm%32)))
+
+#define GET_PARM_BIT32(parm, parm_arr) \
+    ((parm_arr[parm/32]>>(parm%32))& 0x1)
+
+#define WAIT_TIMEOUT 3
+
+/* internal function declare */
+int32_t mm_camera_evt_sub(mm_camera_obj_t * my_obj,
+                          uint8_t reg_flag);
+int32_t mm_camera_enqueue_evt(mm_camera_obj_t *my_obj,
+                              mm_camera_event_t *event);
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_get_channel_by_handler
+ *
+ * DESCRIPTION: utility function to get a channel object from its handle
+ *
+ * PARAMETERS :
+ *   @cam_obj: ptr to a camera object
+ *   @handler: channel handle
+ *
+ * RETURN     : ptr to a channel object.
+ *              NULL if failed.
+ *==========================================================================*/
+mm_channel_t * mm_camera_util_get_channel_by_handler(
+                                    mm_camera_obj_t * cam_obj,
+                                    uint32_t handler)
+{
+    int i;
+    mm_channel_t *ch_obj = NULL;
+    for(i = 0; i < MM_CAMERA_CHANNEL_MAX; i++) {
+        if (handler == cam_obj->ch[i].my_hdl) {
+            ch_obj = &cam_obj->ch[i];
+            break;
+        }
+    }
+    return ch_obj;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_chip_is_a_family
+ *
+ * DESCRIPTION: utility function to check if the host is A family chip
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : TRUE if A family.
+ *              FALSE otherwise.
+ *==========================================================================*/
+uint8_t mm_camera_util_chip_is_a_family(void)
+{
+#ifdef USE_A_FAMILY
+    return TRUE;
+#else
+    return FALSE;
+#endif
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_dispatch_app_event
+ *
+ * DESCRIPTION: dispatch event to apps who regitster for event notify
+ *
+ * PARAMETERS :
+ *   @cmd_cb: ptr to a struct storing event info
+ *   @user_data: user data ptr (camera object)
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void mm_camera_dispatch_app_event(mm_camera_cmdcb_t *cmd_cb,
+                                         void* user_data)
+{
+    mm_camera_cmd_thread_name("mm_cam_event");
+    int i;
+    mm_camera_event_t *event = &cmd_cb->u.evt;
+    mm_camera_obj_t * my_obj = (mm_camera_obj_t *)user_data;
+    if (NULL != my_obj) {
+        pthread_mutex_lock(&my_obj->cb_lock);
+        for(i = 0; i < MM_CAMERA_EVT_ENTRY_MAX; i++) {
+            if(my_obj->evt.evt[i].evt_cb) {
+                my_obj->evt.evt[i].evt_cb(
+                    my_obj->my_hdl,
+                    event,
+                    my_obj->evt.evt[i].user_data);
+            }
+        }
+        pthread_mutex_unlock(&my_obj->cb_lock);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_event_notify
+ *
+ * DESCRIPTION: callback to handle event notify from kernel. This call will
+ *              dequeue event from kernel.
+ *
+ * PARAMETERS :
+ *   @user_data: user data ptr (camera object)
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void mm_camera_event_notify(void* user_data)
+{
+    struct v4l2_event ev;
+    struct msm_v4l2_event_data *msm_evt = NULL;
+    int rc;
+    mm_camera_event_t evt;
+    memset(&evt, 0, sizeof(mm_camera_event_t));
+
+    mm_camera_obj_t *my_obj = (mm_camera_obj_t*)user_data;
+    if (NULL != my_obj) {
+        /* read evt */
+        memset(&ev, 0, sizeof(ev));
+        rc = ioctl(my_obj->ctrl_fd, VIDIOC_DQEVENT, &ev);
+
+        if (rc >= 0 && ev.id == MSM_CAMERA_MSM_NOTIFY) {
+            msm_evt = (struct msm_v4l2_event_data *)ev.u.data;
+            switch (msm_evt->command) {
+            case CAM_EVENT_TYPE_DAEMON_PULL_REQ:
+                evt.server_event_type = CAM_EVENT_TYPE_DAEMON_PULL_REQ;
+                mm_camera_enqueue_evt(my_obj, &evt);
+                break;
+            case CAM_EVENT_TYPE_MAP_UNMAP_DONE:
+                pthread_mutex_lock(&my_obj->evt_lock);
+                my_obj->evt_rcvd.server_event_type = msm_evt->command;
+                my_obj->evt_rcvd.status = msm_evt->status;
+                pthread_cond_signal(&my_obj->evt_cond);
+                pthread_mutex_unlock(&my_obj->evt_lock);
+                break;
+            case CAM_EVENT_TYPE_INT_TAKE_JPEG:
+            case CAM_EVENT_TYPE_INT_TAKE_RAW:
+                {
+                    evt.server_event_type = msm_evt->command;
+                    mm_camera_enqueue_evt(my_obj, &evt);
+                }
+                break;
+            case MSM_CAMERA_PRIV_SHUTDOWN:
+                {
+                    evt.server_event_type = CAM_EVENT_TYPE_DAEMON_DIED;
+                    mm_camera_enqueue_evt(my_obj, &evt);
+                }
+                break;
+            default:
+                break;
+            }
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_enqueue_evt
+ *
+ * DESCRIPTION: enqueue received event into event queue to be processed by
+ *              event thread.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a camera object
+ *   @event    : event to be queued
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_enqueue_evt(mm_camera_obj_t *my_obj,
+                              mm_camera_event_t *event)
+{
+    int32_t rc = 0;
+    mm_camera_cmdcb_t *node = NULL;
+
+    node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+    if (NULL != node) {
+        memset(node, 0, sizeof(mm_camera_cmdcb_t));
+        node->cmd_type = MM_CAMERA_CMD_TYPE_EVT_CB;
+        node->u.evt = *event;
+
+        /* enqueue to evt cmd thread */
+        cam_queue_enq(&(my_obj->evt_thread.cmd_queue), node);
+        /* wake up evt cmd thread */
+        cam_sem_post(&(my_obj->evt_thread.cmd_sem));
+    } else {
+        CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+        rc = -1;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_open
+ *
+ * DESCRIPTION: open a camera
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a camera object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_open(mm_camera_obj_t *my_obj)
+{
+    char dev_name[MM_CAMERA_DEV_NAME_LEN];
+    int32_t rc = 0;
+    int8_t n_try=MM_CAMERA_DEV_OPEN_TRIES;
+    uint8_t sleep_msec=MM_CAMERA_DEV_OPEN_RETRY_SLEEP;
+    int cam_idx = 0;
+    const char *dev_name_value = NULL;
+    char prop[PROPERTY_VALUE_MAX];
+    uint32_t globalLogLevel = 0;
+
+    property_get("persist.camera.hal.debug", prop, "0");
+    int val = atoi(prop);
+    if (0 <= val) {
+        gMmCameraIntfLogLevel = (uint32_t)val;
+    }
+    property_get("persist.camera.global.debug", prop, "0");
+    val = atoi(prop);
+    if (0 <= val) {
+        globalLogLevel = (uint32_t)val;
+    }
+
+    /* Highest log level among hal.logs and global.logs is selected */
+    if (gMmCameraIntfLogLevel < globalLogLevel)
+        gMmCameraIntfLogLevel = globalLogLevel;
+
+    CDBG("%s:  begin\n", __func__);
+
+    if (NULL == my_obj) {
+        goto on_error;
+    }
+    dev_name_value = mm_camera_util_get_dev_name(my_obj->my_hdl);
+    if (NULL == dev_name_value) {
+        goto on_error;
+    }
+    snprintf(dev_name, sizeof(dev_name), "/dev/%s",
+             dev_name_value);
+    sscanf(dev_name, "/dev/video%d", &cam_idx);
+    CDBG("%s: dev name = %s, cam_idx = %d", __func__, dev_name, cam_idx);
+
+    do{
+        n_try--;
+        errno = 0;
+        my_obj->ctrl_fd = open(dev_name, O_RDWR | O_NONBLOCK);
+        CDBG("%s:  ctrl_fd = %d, errno == %d", __func__, my_obj->ctrl_fd, errno);
+        if((my_obj->ctrl_fd >= 0) ||
+                (errno != EIO && errno != ETIMEDOUT && errno != ENODEV) ||
+                (n_try <= 0 )) {
+            CDBG_HIGH("%s:  opened, break out while loop", __func__);
+            if (my_obj->ctrl_fd < 0) {
+                    ALOGE("%s: Failed to open %s: %s(%d).", __func__, dev_name,
+                            strerror(-errno), errno);
+            }
+            break;
+        }
+        ALOGE("%s:Failed with %s error, retrying after %d milli-seconds",
+             __func__, strerror(errno), sleep_msec);
+        usleep(sleep_msec * 1000U);
+    }while (n_try > 0);
+
+    if (my_obj->ctrl_fd < 0) {
+        CDBG_ERROR("%s: cannot open control fd of '%s' (%s)\n",
+                 __func__, dev_name, strerror(errno));
+        if (errno == EBUSY)
+            rc = -EUSERS;
+        else
+            rc = -1;
+        goto on_error;
+    }
+
+    /* open domain socket*/
+    n_try = MM_CAMERA_DEV_OPEN_TRIES;
+    do {
+        n_try--;
+        my_obj->ds_fd = mm_camera_socket_create(cam_idx, MM_CAMERA_SOCK_TYPE_UDP);
+        CDBG("%s:  ds_fd = %d, errno = %d", __func__, my_obj->ds_fd, errno);
+        if((my_obj->ds_fd >= 0) || (n_try <= 0 )) {
+            CDBG("%s:  opened, break out while loop", __func__);
+            break;
+        }
+        CDBG("%s:failed with I/O error retrying after %d milli-seconds",
+             __func__, sleep_msec);
+        usleep(sleep_msec * 1000U);
+    } while (n_try > 0);
+
+    if (my_obj->ds_fd < 0) {
+        CDBG_ERROR("%s: cannot open domain socket fd of '%s'(%s)\n",
+                 __func__, dev_name, strerror(errno));
+        rc = -1;
+        goto on_error;
+    }
+    pthread_mutex_init(&my_obj->msg_lock, NULL);
+
+    pthread_mutex_init(&my_obj->cb_lock, NULL);
+    pthread_mutex_init(&my_obj->evt_lock, NULL);
+    pthread_cond_init(&my_obj->evt_cond, NULL);
+
+    CDBG("%s : Launch evt Thread in Cam Open",__func__);
+    snprintf(my_obj->evt_thread.threadName, THREAD_NAME_SIZE, "CAM_Dispatch");
+    mm_camera_cmd_thread_launch(&my_obj->evt_thread,
+                                mm_camera_dispatch_app_event,
+                                (void *)my_obj);
+
+    /* launch event poll thread
+     * we will add evt fd into event poll thread upon user first register for evt */
+    CDBG("%s : Launch evt Poll Thread in Cam Open", __func__);
+    snprintf(my_obj->evt_thread.threadName, THREAD_NAME_SIZE, "CAM_Poll");
+    mm_camera_poll_thread_launch(&my_obj->evt_poll_thread,
+                                 MM_CAMERA_POLL_TYPE_EVT);
+    mm_camera_evt_sub(my_obj, TRUE);
+
+    /* unlock cam_lock, we need release global intf_lock in camera_open(),
+     * in order not block operation of other Camera in dual camera use case.*/
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    CDBG("%s:  end (rc = %d)\n", __func__, rc);
+    return rc;
+
+on_error:
+
+    if (NULL == dev_name_value) {
+        CDBG_ERROR("%s: Invalid device name\n", __func__);
+        rc = -1;
+    }
+
+    if (NULL == my_obj) {
+        CDBG_ERROR("%s: Invalid camera object\n", __func__);
+        rc = -1;
+    } else {
+        if (my_obj->ctrl_fd >= 0) {
+            close(my_obj->ctrl_fd);
+            my_obj->ctrl_fd = -1;
+        }
+        if (my_obj->ds_fd >= 0) {
+            mm_camera_socket_close(my_obj->ds_fd);
+            my_obj->ds_fd = -1;
+        }
+    }
+
+    /* unlock cam_lock, we need release global intf_lock in camera_open(),
+     * in order not block operation of other Camera in dual camera use case.*/
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_close
+ *
+ * DESCRIPTION: enqueue received event into event queue to be processed by
+ *              event thread.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a camera object
+ *   @event    : event to be queued
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_close(mm_camera_obj_t *my_obj)
+{
+    CDBG("%s : unsubscribe evt", __func__);
+    mm_camera_evt_sub(my_obj, FALSE);
+
+    CDBG("%s : Close evt Poll Thread in Cam Close",__func__);
+    mm_camera_poll_thread_release(&my_obj->evt_poll_thread);
+
+    CDBG("%s : Close evt cmd Thread in Cam Close",__func__);
+    mm_camera_cmd_thread_release(&my_obj->evt_thread);
+
+    if(my_obj->ctrl_fd >= 0) {
+        close(my_obj->ctrl_fd);
+        my_obj->ctrl_fd = -1;
+    }
+    if(my_obj->ds_fd >= 0) {
+        mm_camera_socket_close(my_obj->ds_fd);
+        my_obj->ds_fd = -1;
+    }
+    pthread_mutex_destroy(&my_obj->msg_lock);
+
+    pthread_mutex_destroy(&my_obj->cb_lock);
+    pthread_mutex_destroy(&my_obj->evt_lock);
+    pthread_cond_destroy(&my_obj->evt_cond);
+
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_close_fd
+ *
+ * DESCRIPTION: close the ctrl_fd and socket fd in case of an error so that
+ *              the backend will close
+ *              Do NOT close or release any HAL resources since a close_camera
+ *              has not been called yet.
+ * PARAMETERS :
+ *   @my_obj   : ptr to a camera object
+ *   @event    : event to be queued
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_close_fd(mm_camera_obj_t *my_obj)
+{
+    if(my_obj->ctrl_fd >= 0) {
+        close(my_obj->ctrl_fd);
+        my_obj->ctrl_fd = -1;
+    }
+    if(my_obj->ds_fd >= 0) {
+        mm_camera_socket_close(my_obj->ds_fd);
+        my_obj->ds_fd = -1;
+    }
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_register_event_notify_internal
+ *
+ * DESCRIPTION: internal implementation for registering callback for event notify.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a camera object
+ *   @evt_cb   : callback to be registered to handle event notify
+ *   @user_data: user data ptr
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_register_event_notify_internal(mm_camera_obj_t *my_obj,
+                                                 mm_camera_event_notify_t evt_cb,
+                                                 void * user_data)
+{
+    int i;
+    int rc = -1;
+    mm_camera_evt_obj_t *evt_array = NULL;
+
+    pthread_mutex_lock(&my_obj->cb_lock);
+    evt_array = &my_obj->evt;
+    if(evt_cb) {
+        /* this is reg case */
+        for(i = 0; i < MM_CAMERA_EVT_ENTRY_MAX; i++) {
+            if(evt_array->evt[i].user_data == NULL) {
+                evt_array->evt[i].evt_cb = evt_cb;
+                evt_array->evt[i].user_data = user_data;
+                evt_array->reg_count++;
+                rc = 0;
+                break;
+            }
+        }
+    } else {
+        /* this is unreg case */
+        for(i = 0; i < MM_CAMERA_EVT_ENTRY_MAX; i++) {
+            if(evt_array->evt[i].user_data == user_data) {
+                evt_array->evt[i].evt_cb = NULL;
+                evt_array->evt[i].user_data = NULL;
+                evt_array->reg_count--;
+                rc = 0;
+                break;
+            }
+        }
+    }
+
+    pthread_mutex_unlock(&my_obj->cb_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_register_event_notify
+ *
+ * DESCRIPTION: registering a callback for event notify.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a camera object
+ *   @evt_cb   : callback to be registered to handle event notify
+ *   @user_data: user data ptr
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_register_event_notify(mm_camera_obj_t *my_obj,
+                                        mm_camera_event_notify_t evt_cb,
+                                        void * user_data)
+{
+    int rc = -1;
+    rc = mm_camera_register_event_notify_internal(my_obj,
+                                                  evt_cb,
+                                                  user_data);
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_qbuf
+ *
+ * DESCRIPTION: enqueue buffer back to kernel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @buf          : buf ptr to be enqueued
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_qbuf(mm_camera_obj_t *my_obj,
+                       uint32_t ch_id,
+                       mm_camera_buf_def_t *buf)
+{
+    int rc = -1;
+    mm_channel_t * ch_obj = NULL;
+    ch_obj = mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    pthread_mutex_unlock(&my_obj->cam_lock);
+
+    /* we always assume qbuf will be done before channel/stream is fully stopped
+     * because qbuf is done within dataCB context
+     * in order to avoid deadlock, we are not locking ch_lock for qbuf */
+    if (NULL != ch_obj) {
+        rc = mm_channel_qbuf(ch_obj, buf);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_get_queued_buf_count
+ *
+ * DESCRIPTION: return queued buffer count
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @stream_id : stream id
+ *
+ * RETURN     : queued buffer count
+ *==========================================================================*/
+int32_t mm_camera_get_queued_buf_count(mm_camera_obj_t *my_obj,
+        uint32_t ch_id, uint32_t stream_id)
+{
+    int rc = -1;
+    mm_channel_t * ch_obj = NULL;
+    uint32_t payload;
+    ch_obj = mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+    payload = stream_id;
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+        rc = mm_channel_fsm_fn(ch_obj,
+                MM_CHANNEL_EVT_GET_STREAM_QUEUED_BUF_COUNT,
+                (void *)&payload,
+                NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_query_capability
+ *
+ * DESCRIPTION: query camera capability
+ *
+ * PARAMETERS :
+ *   @my_obj: camera object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_query_capability(mm_camera_obj_t *my_obj)
+{
+    int32_t rc = 0;
+    struct v4l2_capability cap;
+
+    /* get camera capabilities */
+    memset(&cap, 0, sizeof(cap));
+    rc = ioctl(my_obj->ctrl_fd, VIDIOC_QUERYCAP, &cap);
+    if (rc != 0) {
+        CDBG_ERROR("%s: cannot get camera capabilities, rc = %d\n", __func__, rc);
+    }
+
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_set_parms
+ *
+ * DESCRIPTION: set parameters per camera
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @parms        : ptr to a param struct to be set to server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Corresponding fields of parameters to be set
+ *              are already filled in by upper layer caller.
+ *==========================================================================*/
+int32_t mm_camera_set_parms(mm_camera_obj_t *my_obj,
+                            parm_buffer_t *parms)
+{
+    int32_t rc = -1;
+    int32_t value = 0;
+    if (parms !=  NULL) {
+        rc = mm_camera_util_s_ctrl(my_obj->ctrl_fd, CAM_PRIV_PARM, &value);
+    }
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_get_parms
+ *
+ * DESCRIPTION: get parameters per camera
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @parms        : ptr to a param struct to be get from server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Parameters to be get from server are already
+ *              filled in by upper layer caller. After this call, corresponding
+ *              fields of requested parameters will be filled in by server with
+ *              detailed information.
+ *==========================================================================*/
+int32_t mm_camera_get_parms(mm_camera_obj_t *my_obj,
+                            parm_buffer_t *parms)
+{
+    int32_t rc = -1;
+    int32_t value = 0;
+    if (parms != NULL) {
+        rc = mm_camera_util_g_ctrl(my_obj->ctrl_fd, CAM_PRIV_PARM, &value);
+    }
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_do_auto_focus
+ *
+ * DESCRIPTION: performing auto focus
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : if this call success, we will always assume there will
+ *              be an auto_focus event following up.
+ *==========================================================================*/
+int32_t mm_camera_do_auto_focus(mm_camera_obj_t *my_obj)
+{
+    int32_t rc = -1;
+    int32_t value = 0;
+    rc = mm_camera_util_s_ctrl(my_obj->ctrl_fd, CAM_PRIV_DO_AUTO_FOCUS, &value);
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_cancel_auto_focus
+ *
+ * DESCRIPTION: cancel auto focus
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_cancel_auto_focus(mm_camera_obj_t *my_obj)
+{
+    int32_t rc = -1;
+    int32_t value = 0;
+    rc = mm_camera_util_s_ctrl(my_obj->ctrl_fd, CAM_PRIV_CANCEL_AUTO_FOCUS, &value);
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_prepare_snapshot
+ *
+ * DESCRIPTION: prepare hardware for snapshot
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @do_af_flag   : flag indicating if AF is needed
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_prepare_snapshot(mm_camera_obj_t *my_obj,
+                                   int32_t do_af_flag)
+{
+    int32_t rc = -1;
+    int32_t value = do_af_flag;
+    rc = mm_camera_util_s_ctrl(my_obj->ctrl_fd, CAM_PRIV_PREPARE_SNAPSHOT, &value);
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_start_zsl_snapshot
+ *
+ * DESCRIPTION: start zsl snapshot
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_start_zsl_snapshot(mm_camera_obj_t *my_obj)
+{
+    int32_t rc = -1;
+    int32_t value = 0;
+
+    rc = mm_camera_util_s_ctrl(my_obj->ctrl_fd,
+             CAM_PRIV_START_ZSL_SNAPSHOT, &value);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_stop_zsl_snapshot
+ *
+ * DESCRIPTION: stop zsl capture
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_stop_zsl_snapshot(mm_camera_obj_t *my_obj)
+{
+    int32_t rc = -1;
+    int32_t value;
+    rc = mm_camera_util_s_ctrl(my_obj->ctrl_fd,
+             CAM_PRIV_STOP_ZSL_SNAPSHOT, &value);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_add_channel
+ *
+ * DESCRIPTION: add a channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @attr         : bundle attribute of the channel if needed
+ *   @channel_cb   : callback function for bundle data notify
+ *   @userdata     : user data ptr
+ *
+ * RETURN     : uint32_t type of channel handle
+ *              0  -- invalid channel handle, meaning the op failed
+ *              >0 -- successfully added a channel with a valid handle
+ * NOTE       : if no bundle data notify is needed, meaning each stream in the
+ *              channel will have its own stream data notify callback, then
+ *              attr, channel_cb, and userdata can be NULL. In this case,
+ *              no matching logic will be performed in channel for the bundling.
+ *==========================================================================*/
+uint32_t mm_camera_add_channel(mm_camera_obj_t *my_obj,
+                               mm_camera_channel_attr_t *attr,
+                               mm_camera_buf_notify_t channel_cb,
+                               void *userdata)
+{
+    mm_channel_t *ch_obj = NULL;
+    uint8_t ch_idx = 0;
+    uint32_t ch_hdl = 0;
+
+    for(ch_idx = 0; ch_idx < MM_CAMERA_CHANNEL_MAX; ch_idx++) {
+        if (MM_CHANNEL_STATE_NOTUSED == my_obj->ch[ch_idx].state) {
+            ch_obj = &my_obj->ch[ch_idx];
+            break;
+        }
+    }
+
+    if (NULL != ch_obj) {
+        /* initialize channel obj */
+        memset(ch_obj, 0, sizeof(mm_channel_t));
+        ch_hdl = mm_camera_util_generate_handler(ch_idx);
+        ch_obj->my_hdl = ch_hdl;
+        ch_obj->state = MM_CHANNEL_STATE_STOPPED;
+        ch_obj->cam_obj = my_obj;
+        pthread_mutex_init(&ch_obj->ch_lock, NULL);
+        mm_channel_init(ch_obj, attr, channel_cb, userdata);
+    }
+
+    pthread_mutex_unlock(&my_obj->cam_lock);
+
+    return ch_hdl;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_del_channel
+ *
+ * DESCRIPTION: delete a channel by its handle
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : all streams in the channel should be stopped already before
+ *              this channel can be deleted.
+ *==========================================================================*/
+int32_t mm_camera_del_channel(mm_camera_obj_t *my_obj,
+                              uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_DELETE,
+                               NULL,
+                               NULL);
+
+        pthread_mutex_destroy(&ch_obj->ch_lock);
+        memset(ch_obj, 0, sizeof(mm_channel_t));
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_get_bundle_info
+ *
+ * DESCRIPTION: query bundle info of the channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @bundle_info  : bundle info to be filled in
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : all streams in the channel should be stopped already before
+ *              this channel can be deleted.
+ *==========================================================================*/
+int32_t mm_camera_get_bundle_info(mm_camera_obj_t *my_obj,
+                                  uint32_t ch_id,
+                                  cam_bundle_config_t *bundle_info)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_GET_BUNDLE_INFO,
+                               (void *)bundle_info,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_link_stream
+ *
+ * DESCRIPTION: link a stream into a channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @stream_id    : stream that will be linked
+ *   @linked_ch_id : channel in which the stream will be linked
+ *
+ * RETURN     : uint32_t type of stream handle
+ *              0  -- invalid stream handle, meaning the op failed
+ *              >0 -- successfully linked a stream with a valid handle
+ *==========================================================================*/
+uint32_t mm_camera_link_stream(mm_camera_obj_t *my_obj,
+        uint32_t ch_id,
+        uint32_t stream_id,
+        uint32_t linked_ch_id)
+{
+    uint32_t s_hdl = 0;
+    mm_channel_t * ch_obj =
+            mm_camera_util_get_channel_by_handler(my_obj, linked_ch_id);
+    mm_channel_t * owner_obj =
+            mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if ((NULL != ch_obj) && (NULL != owner_obj)) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        mm_camera_stream_link_t stream_link;
+        memset(&stream_link, 0, sizeof(mm_camera_stream_link_t));
+        stream_link.ch = owner_obj;
+        stream_link.stream_id = stream_id;
+        mm_channel_fsm_fn(ch_obj,
+                          MM_CHANNEL_EVT_LINK_STREAM,
+                          (void*)&stream_link,
+                          (void*)&s_hdl);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return s_hdl;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_add_stream
+ *
+ * DESCRIPTION: add a stream into a channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : uint32_t type of stream handle
+ *              0  -- invalid stream handle, meaning the op failed
+ *              >0 -- successfully added a stream with a valid handle
+ *==========================================================================*/
+uint32_t mm_camera_add_stream(mm_camera_obj_t *my_obj,
+                              uint32_t ch_id)
+{
+    uint32_t s_hdl = 0;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        mm_channel_fsm_fn(ch_obj,
+                          MM_CHANNEL_EVT_ADD_STREAM,
+                          NULL,
+                          (void *)&s_hdl);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return s_hdl;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_del_stream
+ *
+ * DESCRIPTION: delete a stream by its handle
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @stream_id    : stream handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : stream should be stopped already before it can be deleted.
+ *==========================================================================*/
+int32_t mm_camera_del_stream(mm_camera_obj_t *my_obj,
+                             uint32_t ch_id,
+                             uint32_t stream_id)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_DEL_STREAM,
+                               (void *)&stream_id,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_start_zsl_snapshot_ch
+ *
+ * DESCRIPTION: starts zsl snapshot for specific channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_start_zsl_snapshot_ch(mm_camera_obj_t *my_obj,
+        uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_START_ZSL_SNAPSHOT,
+                               NULL,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_stop_zsl_snapshot_ch
+ *
+ * DESCRIPTION: stops zsl snapshot for specific channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_stop_zsl_snapshot_ch(mm_camera_obj_t *my_obj,
+        uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_STOP_ZSL_SNAPSHOT,
+                               NULL,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_config_stream
+ *
+ * DESCRIPTION: configure a stream
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @stream_id    : stream handle
+ *   @config       : stream configuration
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_config_stream(mm_camera_obj_t *my_obj,
+                                uint32_t ch_id,
+                                uint32_t stream_id,
+                                mm_camera_stream_config_t *config)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+    mm_evt_paylod_config_stream_t payload;
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        memset(&payload, 0, sizeof(mm_evt_paylod_config_stream_t));
+        payload.stream_id = stream_id;
+        payload.config = config;
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_CONFIG_STREAM,
+                               (void *)&payload,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_start_channel
+ *
+ * DESCRIPTION: start a channel, which will start all streams in the channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_start_channel(mm_camera_obj_t *my_obj,
+                                uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_START,
+                               NULL,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_stop_channel
+ *
+ * DESCRIPTION: stop a channel, which will stop all streams in the channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_stop_channel(mm_camera_obj_t *my_obj,
+                               uint32_t ch_id)
+{
+    int32_t rc = 0;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_STOP,
+                               NULL,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_request_super_buf
+ *
+ * DESCRIPTION: for burst mode in bundle, reuqest certain amount of matched
+ *              frames from superbuf queue
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @num_buf_requested : number of matched frames needed
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_request_super_buf(mm_camera_obj_t *my_obj,
+                                    uint32_t ch_id,
+                                    uint32_t num_buf_requested,
+                                    uint32_t num_retro_buf_requested)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_REQUEST_SUPER_BUF,
+                               (void *)&num_buf_requested,
+                               (void *)&num_retro_buf_requested);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_cancel_super_buf_request
+ *
+ * DESCRIPTION: for burst mode in bundle, cancel the reuqest for certain amount
+ *              of matched frames from superbuf queue
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_cancel_super_buf_request(mm_camera_obj_t *my_obj, uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_CANCEL_REQUEST_SUPER_BUF,
+                               NULL,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_flush_super_buf_queue
+ *
+ * DESCRIPTION: flush out all frames in the superbuf queue
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_flush_super_buf_queue(mm_camera_obj_t *my_obj, uint32_t ch_id,
+                                                             uint32_t frame_idx)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_FLUSH_SUPER_BUF_QUEUE,
+                               (void *)&frame_idx,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_config_channel_notify
+ *
+ * DESCRIPTION: configures the channel notification mode
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @notify_mode  : notification mode
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_config_channel_notify(mm_camera_obj_t *my_obj,
+                                        uint32_t ch_id,
+                                        mm_camera_super_buf_notify_mode_t notify_mode)
+{
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_CONFIG_NOTIFY_MODE,
+                               (void *)&notify_mode,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_set_stream_parms
+ *
+ * DESCRIPTION: set parameters per stream
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @s_id         : stream handle
+ *   @parms        : ptr to a param struct to be set to server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Corresponding fields of parameters to be set
+ *              are already filled in by upper layer caller.
+ *==========================================================================*/
+int32_t mm_camera_set_stream_parms(mm_camera_obj_t *my_obj,
+                                   uint32_t ch_id,
+                                   uint32_t s_id,
+                                   cam_stream_parm_buffer_t *parms)
+{
+    int32_t rc = -1;
+    mm_evt_paylod_set_get_stream_parms_t payload;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        memset(&payload, 0, sizeof(payload));
+        payload.stream_id = s_id;
+        payload.parms = parms;
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_SET_STREAM_PARM,
+                               (void *)&payload,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_get_stream_parms
+ *
+ * DESCRIPTION: get parameters per stream
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @s_id         : stream handle
+ *   @parms        : ptr to a param struct to be get from server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Parameters to be get from server are already
+ *              filled in by upper layer caller. After this call, corresponding
+ *              fields of requested parameters will be filled in by server with
+ *              detailed information.
+ *==========================================================================*/
+int32_t mm_camera_get_stream_parms(mm_camera_obj_t *my_obj,
+                                   uint32_t ch_id,
+                                   uint32_t s_id,
+                                   cam_stream_parm_buffer_t *parms)
+{
+    int32_t rc = -1;
+    mm_evt_paylod_set_get_stream_parms_t payload;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        memset(&payload, 0, sizeof(payload));
+        payload.stream_id = s_id;
+        payload.parms = parms;
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_GET_STREAM_PARM,
+                               (void *)&payload,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_do_stream_action
+ *
+ * DESCRIPTION: request server to perform stream based action. Maybe removed later
+ *              if the functionality is included in mm_camera_set_parms
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @s_id         : stream handle
+ *   @actions      : ptr to an action struct buf to be performed by server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the action struct buf is already mapped to server via
+ *              domain socket. Actions to be performed by server are already
+ *              filled in by upper layer caller.
+ *==========================================================================*/
+int32_t mm_camera_do_stream_action(mm_camera_obj_t *my_obj,
+                                   uint32_t ch_id,
+                                   uint32_t stream_id,
+                                   void *actions)
+{
+    int32_t rc = -1;
+    mm_evt_paylod_do_stream_action_t payload;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        memset(&payload, 0, sizeof(payload));
+        payload.stream_id = stream_id;
+        payload.actions = actions;
+
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_DO_STREAM_ACTION,
+                               (void*)&payload,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_map_stream_buf
+ *
+ * DESCRIPTION: mapping stream buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @s_id         : stream handle
+ *   @buf_type     : type of buffer to be mapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_INFO
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @buf_idx      : index of buffer within the stream buffers, only valid if
+ *                   buf_type is CAM_MAPPING_BUF_TYPE_STREAM_BUF or
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @plane_idx    : plane index. If all planes share the same fd,
+ *                   plane_idx = -1; otherwise, plean_idx is the
+ *                   index to plane (0..num_of_planes)
+ *   @fd           : file descriptor of the buffer
+ *   @size         : size of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_map_stream_buf(mm_camera_obj_t *my_obj,
+                                 uint32_t ch_id,
+                                 uint32_t stream_id,
+                                 uint8_t buf_type,
+                                 uint32_t buf_idx,
+                                 int32_t plane_idx,
+                                 int fd,
+                                 size_t size)
+{
+    int32_t rc = -1;
+    mm_evt_paylod_map_stream_buf_t payload;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        memset(&payload, 0, sizeof(payload));
+        payload.stream_id = stream_id;
+        payload.buf_type = buf_type;
+        payload.buf_idx = buf_idx;
+        payload.plane_idx = plane_idx;
+        payload.fd = fd;
+        payload.size = size;
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_MAP_STREAM_BUF,
+                               (void*)&payload,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_unmap_stream_buf
+ *
+ * DESCRIPTION: unmapping stream buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+ *   @s_id         : stream handle
+ *   @buf_type     : type of buffer to be mapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_INFO
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @buf_idx      : index of buffer within the stream buffers, only valid if
+ *                   buf_type is CAM_MAPPING_BUF_TYPE_STREAM_BUF or
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @plane_idx    : plane index. If all planes share the same fd,
+ *                   plane_idx = -1; otherwise, plean_idx is the
+ *                   index to plane (0..num_of_planes)
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_unmap_stream_buf(mm_camera_obj_t *my_obj,
+                                   uint32_t ch_id,
+                                   uint32_t stream_id,
+                                   uint8_t buf_type,
+                                   uint32_t buf_idx,
+                                   int32_t plane_idx)
+{
+    int32_t rc = -1;
+    mm_evt_paylod_unmap_stream_buf_t payload;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+
+        memset(&payload, 0, sizeof(payload));
+        payload.stream_id = stream_id;
+        payload.buf_type = buf_type;
+        payload.buf_idx = buf_idx;
+        payload.plane_idx = plane_idx;
+        rc = mm_channel_fsm_fn(ch_obj,
+                               MM_CHANNEL_EVT_UNMAP_STREAM_BUF,
+                               (void*)&payload,
+                               NULL);
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_evt_sub
+ *
+ * DESCRIPTION: subscribe/unsubscribe event notify from kernel
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @reg_flag     : 1 -- subscribe ; 0 -- unsubscribe
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_evt_sub(mm_camera_obj_t * my_obj,
+                          uint8_t reg_flag)
+{
+    int32_t rc = 0;
+    struct v4l2_event_subscription sub;
+
+    memset(&sub, 0, sizeof(sub));
+    sub.type = MSM_CAMERA_V4L2_EVENT_TYPE;
+    sub.id = MSM_CAMERA_MSM_NOTIFY;
+    if(FALSE == reg_flag) {
+        /* unsubscribe */
+        rc = ioctl(my_obj->ctrl_fd, VIDIOC_UNSUBSCRIBE_EVENT, &sub);
+        if (rc < 0) {
+            CDBG_ERROR("%s: unsubscribe event rc = %d", __func__, rc);
+            return rc;
+        }
+        /* remove evt fd from the polling thraed when unreg the last event */
+        rc = mm_camera_poll_thread_del_poll_fd(&my_obj->evt_poll_thread,
+                                               my_obj->my_hdl,
+                                               mm_camera_sync_call);
+    } else {
+        rc = ioctl(my_obj->ctrl_fd, VIDIOC_SUBSCRIBE_EVENT, &sub);
+        if (rc < 0) {
+            CDBG_ERROR("%s: subscribe event rc = %d", __func__, rc);
+            return rc;
+        }
+        /* add evt fd to polling thread when subscribe the first event */
+        rc = mm_camera_poll_thread_add_poll_fd(&my_obj->evt_poll_thread,
+                                               my_obj->my_hdl,
+                                               my_obj->ctrl_fd,
+                                               mm_camera_event_notify,
+                                               (void*)my_obj,
+                                               mm_camera_sync_call);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_wait_for_event
+ *
+ * DESCRIPTION: utility function to wait for certain events
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @evt_mask     : mask for events to be waited. Any of event in the mask would
+ *                   trigger the wait to end
+ *   @status       : status of the event
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void mm_camera_util_wait_for_event(mm_camera_obj_t *my_obj,
+                                   uint32_t evt_mask,
+                                   uint32_t *status)
+{
+    int rc = 0;
+    struct timespec ts;
+
+    pthread_mutex_lock(&my_obj->evt_lock);
+    while (!(my_obj->evt_rcvd.server_event_type & evt_mask)) {
+        clock_gettime(CLOCK_REALTIME, &ts);
+        ts.tv_sec += WAIT_TIMEOUT;
+        rc = pthread_cond_timedwait(&my_obj->evt_cond, &my_obj->evt_lock, &ts);
+        if (rc == ETIMEDOUT) {
+            ALOGE("%s pthread_cond_timedwait success\n", __func__);
+            break;
+        }
+    }
+    *status = my_obj->evt_rcvd.status;
+    /* reset local storage for recieved event for next event */
+    memset(&my_obj->evt_rcvd, 0, sizeof(mm_camera_event_t));
+    pthread_mutex_unlock(&my_obj->evt_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_sendmsg
+ *
+ * DESCRIPTION: utility function to send msg via domain socket
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @msg          : message to be sent
+ *   @buf_size     : size of the message to be sent
+ *   @sendfd       : >0 if any file descriptor need to be passed across process
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_util_sendmsg(mm_camera_obj_t *my_obj,
+                               void *msg,
+                               size_t buf_size,
+                               int sendfd)
+{
+    int32_t rc = -1;
+    uint32_t status;
+
+    /* need to lock msg_lock, since sendmsg until reposonse back is deemed as one operation*/
+    pthread_mutex_lock(&my_obj->msg_lock);
+    if(mm_camera_socket_sendmsg(my_obj->ds_fd, msg, buf_size, sendfd) > 0) {
+        /* wait for event that mapping/unmapping is done */
+        mm_camera_util_wait_for_event(my_obj, CAM_EVENT_TYPE_MAP_UNMAP_DONE, &status);
+        if (MSM_CAMERA_STATUS_SUCCESS == status) {
+            rc = 0;
+        }
+    }
+    pthread_mutex_unlock(&my_obj->msg_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_map_buf
+ *
+ * DESCRIPTION: mapping camera buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @buf_type     : type of buffer to be mapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_CAPABILITY
+ *                   CAM_MAPPING_BUF_TYPE_SETPARM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_GETPARM_BUF
+ *   @fd           : file descriptor of the buffer
+ *   @size         : size of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_map_buf(mm_camera_obj_t *my_obj,
+                          uint8_t buf_type,
+                          int fd,
+                          size_t size)
+{
+    int32_t rc = 0;
+    cam_sock_packet_t packet;
+    memset(&packet, 0, sizeof(cam_sock_packet_t));
+    packet.msg_type = CAM_MAPPING_TYPE_FD_MAPPING;
+    packet.payload.buf_map.type = buf_type;
+    packet.payload.buf_map.fd = fd;
+    packet.payload.buf_map.size = size;
+    rc = mm_camera_util_sendmsg(my_obj,
+                                &packet,
+                                sizeof(cam_sock_packet_t),
+                                fd);
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_unmap_buf
+ *
+ * DESCRIPTION: unmapping camera buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @buf_type     : type of buffer to be mapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_CAPABILITY
+ *                   CAM_MAPPING_BUF_TYPE_SETPARM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_GETPARM_BUF
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_unmap_buf(mm_camera_obj_t *my_obj,
+                            uint8_t buf_type)
+{
+    int32_t rc = 0;
+    cam_sock_packet_t packet;
+    memset(&packet, 0, sizeof(cam_sock_packet_t));
+    packet.msg_type = CAM_MAPPING_TYPE_FD_UNMAPPING;
+    packet.payload.buf_unmap.type = buf_type;
+    rc = mm_camera_util_sendmsg(my_obj,
+                                &packet,
+                                sizeof(cam_sock_packet_t),
+                                -1);
+    pthread_mutex_unlock(&my_obj->cam_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_s_ctrl
+ *
+ * DESCRIPTION: utility function to send v4l2 ioctl for s_ctrl
+ *
+ * PARAMETERS :
+ *   @fd      : file descritpor for sending ioctl
+ *   @id      : control id
+ *   @value   : value of the ioctl to be sent
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_util_s_ctrl(int32_t fd,  uint32_t id, int32_t *value)
+{
+    int rc = 0;
+    struct v4l2_control control;
+
+    memset(&control, 0, sizeof(control));
+    control.id = id;
+    if (value != NULL) {
+        control.value = *value;
+    }
+    rc = ioctl(fd, VIDIOC_S_CTRL, &control);
+
+    CDBG("%s: fd=%d, S_CTRL, id=0x%x, value = %p, rc = %d\n",
+         __func__, fd, id, value, rc);
+    if (value != NULL) {
+        *value = control.value;
+    }
+    return (rc >= 0)? 0 : -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_g_ctrl
+ *
+ * DESCRIPTION: utility function to send v4l2 ioctl for g_ctrl
+ *
+ * PARAMETERS :
+ *   @fd      : file descritpor for sending ioctl
+ *   @id      : control id
+ *   @value   : value of the ioctl to be sent
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_util_g_ctrl( int32_t fd, uint32_t id, int32_t *value)
+{
+    int rc = 0;
+    struct v4l2_control control;
+
+    memset(&control, 0, sizeof(control));
+    control.id = id;
+    if (value != NULL) {
+        control.value = *value;
+    }
+    rc = ioctl(fd, VIDIOC_G_CTRL, &control);
+    CDBG("%s: fd=%d, G_CTRL, id=0x%x, rc = %d\n", __func__, fd, id, rc);
+    if (value != NULL) {
+        *value = control.value;
+    }
+    return (rc >= 0)? 0 : -1;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_channel_advanced_capture
+ *
+ * DESCRIPTION: sets the channel advanced capture
+ *
+ * PARAMETERS :
+ *   @my_obj       : camera object
+ *   @ch_id        : channel handle
+  *   @type : advanced capture type.
+ *   @start_flag  : flag to indicate start/stop
+  *   @in_value  : input configaration
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_channel_advanced_capture(mm_camera_obj_t *my_obj,
+            uint32_t ch_id, mm_camera_advanced_capture_t type,
+            uint32_t trigger, void *in_value)
+{
+    CDBG("%s: E type = %d",__func__, type);
+    int32_t rc = -1;
+    mm_channel_t * ch_obj =
+        mm_camera_util_get_channel_by_handler(my_obj, ch_id);
+
+    if (NULL != ch_obj) {
+        pthread_mutex_lock(&ch_obj->ch_lock);
+        pthread_mutex_unlock(&my_obj->cam_lock);
+        switch (type) {
+            case MM_CAMERA_AF_BRACKETING:
+                rc = mm_channel_fsm_fn(ch_obj,
+                                       MM_CHANNEL_EVT_AF_BRACKETING,
+                                       (void *)&trigger,
+                                       NULL);
+                break;
+            case MM_CAMERA_AE_BRACKETING:
+                rc = mm_channel_fsm_fn(ch_obj,
+                                       MM_CHANNEL_EVT_AE_BRACKETING,
+                                       (void *)&trigger,
+                                       NULL);
+                break;
+            case MM_CAMERA_FLASH_BRACKETING:
+                rc = mm_channel_fsm_fn(ch_obj,
+                                       MM_CHANNEL_EVT_FLASH_BRACKETING,
+                                       (void *)&trigger,
+                                       NULL);
+                break;
+            case MM_CAMERA_ZOOM_1X:
+                rc = mm_channel_fsm_fn(ch_obj,
+                                       MM_CHANNEL_EVT_ZOOM_1X,
+                                       (void *)&trigger,
+                                       NULL);
+                break;
+            case MM_CAMERA_FRAME_CAPTURE:
+                rc = mm_channel_fsm_fn(ch_obj,
+                                       MM_CAMERA_EVT_CAPTURE_SETTING,
+                                       (void *)in_value,
+                                       NULL);
+                break;
+            default:
+                break;
+        }
+
+    } else {
+        pthread_mutex_unlock(&my_obj->cam_lock);
+    }
+
+    CDBG("%s: X",__func__);
+    return rc;
+}
diff --git a/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_channel.c b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_channel.c
new file mode 100644
index 0000000..0e968f3
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_channel.c
@@ -0,0 +1,2850 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <cam_semaphore.h>
+
+#include "mm_camera_dbg.h"
+#include "mm_camera_interface.h"
+#include "mm_camera.h"
+
+extern mm_camera_obj_t* mm_camera_util_get_camera_by_handler(uint32_t cam_handler);
+extern mm_channel_t * mm_camera_util_get_channel_by_handler(mm_camera_obj_t * cam_obj,
+                                                            uint32_t handler);
+
+/* internal function declare goes here */
+int32_t mm_channel_qbuf(mm_channel_t *my_obj,
+                        mm_camera_buf_def_t *buf);
+int32_t mm_channel_init(mm_channel_t *my_obj,
+                        mm_camera_channel_attr_t *attr,
+                        mm_camera_buf_notify_t channel_cb,
+                        void *userdata);
+void mm_channel_release(mm_channel_t *my_obj);
+uint32_t mm_channel_add_stream(mm_channel_t *my_obj);
+int32_t mm_channel_del_stream(mm_channel_t *my_obj,
+                                   uint32_t stream_id);
+uint32_t mm_channel_link_stream(mm_channel_t *my_obj,
+        mm_camera_stream_link_t *stream_link);
+int32_t mm_channel_config_stream(mm_channel_t *my_obj,
+                                 uint32_t stream_id,
+                                 mm_camera_stream_config_t *config);
+int32_t mm_channel_get_bundle_info(mm_channel_t *my_obj,
+                                   cam_bundle_config_t *bundle_info);
+int32_t mm_channel_start(mm_channel_t *my_obj);
+int32_t mm_channel_stop(mm_channel_t *my_obj);
+int32_t mm_channel_request_super_buf(mm_channel_t *my_obj,
+                uint32_t num_buf_requested, uint32_t num_reto_buf_requested);
+int32_t mm_channel_cancel_super_buf_request(mm_channel_t *my_obj);
+int32_t mm_channel_flush_super_buf_queue(mm_channel_t *my_obj,
+                                         uint32_t frame_idx);
+int32_t mm_channel_config_notify_mode(mm_channel_t *my_obj,
+                                      mm_camera_super_buf_notify_mode_t notify_mode);
+int32_t mm_channel_start_zsl_snapshot(mm_channel_t *my_obj);
+int32_t mm_channel_stop_zsl_snapshot(mm_channel_t *my_obj);
+int32_t mm_channel_superbuf_flush(mm_channel_t* my_obj,
+        mm_channel_queue_t * queue, cam_stream_type_t cam_type);
+int32_t mm_channel_set_stream_parm(mm_channel_t *my_obj,
+                                   mm_evt_paylod_set_get_stream_parms_t *payload);
+int32_t mm_channel_get_queued_buf_count(mm_channel_t *my_obj,
+        uint32_t stream_id);
+
+int32_t mm_channel_get_stream_parm(mm_channel_t *my_obj,
+                                   mm_evt_paylod_set_get_stream_parms_t *payload);
+int32_t mm_channel_do_stream_action(mm_channel_t *my_obj,
+                                    mm_evt_paylod_do_stream_action_t *payload);
+int32_t mm_channel_map_stream_buf(mm_channel_t *my_obj,
+                                  mm_evt_paylod_map_stream_buf_t *payload);
+int32_t mm_channel_unmap_stream_buf(mm_channel_t *my_obj,
+                                    mm_evt_paylod_unmap_stream_buf_t *payload);
+
+/* state machine function declare */
+int32_t mm_channel_fsm_fn_notused(mm_channel_t *my_obj,
+                          mm_channel_evt_type_t evt,
+                          void * in_val,
+                          void * out_val);
+int32_t mm_channel_fsm_fn_stopped(mm_channel_t *my_obj,
+                          mm_channel_evt_type_t evt,
+                          void * in_val,
+                          void * out_val);
+int32_t mm_channel_fsm_fn_active(mm_channel_t *my_obj,
+                          mm_channel_evt_type_t evt,
+                          void * in_val,
+                          void * out_val);
+int32_t mm_channel_fsm_fn_paused(mm_channel_t *my_obj,
+                          mm_channel_evt_type_t evt,
+                          void * in_val,
+                          void * out_val);
+
+/* channel super queue functions */
+int32_t mm_channel_superbuf_queue_init(mm_channel_queue_t * queue);
+int32_t mm_channel_superbuf_queue_deinit(mm_channel_queue_t * queue);
+int32_t mm_channel_superbuf_comp_and_enqueue(mm_channel_t *ch_obj,
+                                             mm_channel_queue_t * queue,
+                                             mm_camera_buf_info_t *buf);
+mm_channel_queue_node_t* mm_channel_superbuf_dequeue(mm_channel_queue_t * queue);
+int32_t mm_channel_superbuf_bufdone_overflow(mm_channel_t *my_obj,
+                                             mm_channel_queue_t *queue);
+int32_t mm_channel_superbuf_skip(mm_channel_t *my_obj,
+                                 mm_channel_queue_t *queue);
+
+static int32_t mm_channel_proc_general_cmd(mm_channel_t *my_obj,
+                                           mm_camera_generic_cmd_t *p_gen_cmd);
+int32_t mm_channel_superbuf_flush_matched(mm_channel_t* my_obj,
+                                          mm_channel_queue_t * queue);
+/*===========================================================================
+ * FUNCTION   : mm_channel_util_get_stream_by_handler
+ *
+ * DESCRIPTION: utility function to get a stream object from its handle
+ *
+ * PARAMETERS :
+ *   @cam_obj: ptr to a channel object
+ *   @handler: stream handle
+ *
+ * RETURN     : ptr to a stream object.
+ *              NULL if failed.
+ *==========================================================================*/
+mm_stream_t * mm_channel_util_get_stream_by_handler(
+                                    mm_channel_t * ch_obj,
+                                    uint32_t handler)
+{
+    int i;
+    mm_stream_t *s_obj = NULL;
+    for(i = 0; i < MAX_STREAM_NUM_IN_BUNDLE; i++) {
+        if ((MM_STREAM_STATE_NOTUSED != ch_obj->streams[i].state) &&
+            (handler == ch_obj->streams[i].my_hdl)) {
+            s_obj = &ch_obj->streams[i];
+            break;
+        }
+    }
+    return s_obj;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_dispatch_super_buf
+ *
+ * DESCRIPTION: dispatch super buffer of bundle to registered user
+ *
+ * PARAMETERS :
+ *   @cmd_cb  : ptr storing matched super buf information
+ *   @userdata: user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void mm_channel_dispatch_super_buf(mm_camera_cmdcb_t *cmd_cb,
+                                          void* user_data)
+{
+    mm_camera_cmd_thread_name("mm_cam_cb");
+    mm_channel_t * my_obj = (mm_channel_t *)user_data;
+
+    if (NULL == my_obj) {
+        return;
+    }
+
+    if (MM_CAMERA_CMD_TYPE_SUPER_BUF_DATA_CB != cmd_cb->cmd_type) {
+        CDBG_ERROR("%s: Wrong cmd_type (%d) for super buf dataCB",
+                   __func__, cmd_cb->cmd_type);
+        return;
+    }
+
+    if (my_obj->bundle.super_buf_notify_cb) {
+        my_obj->bundle.super_buf_notify_cb(&cmd_cb->u.superbuf, my_obj->bundle.user_data);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_process_stream_buf
+ *
+ * DESCRIPTION: handle incoming buffer from stream in a bundle. In this function,
+ *              matching logic will be performed on incoming stream frames.
+ *              Will depends on the bundle attribute, either storing matched frames
+ *              in the superbuf queue, or sending matched superbuf frames to upper
+ *              layer through registered callback.
+ *
+ * PARAMETERS :
+ *   @cmd_cb  : ptr storing matched super buf information
+ *   @userdata: user data ptr
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void mm_channel_process_stream_buf(mm_camera_cmdcb_t * cmd_cb,
+                                          void *user_data)
+{
+    mm_camera_cmd_thread_name("mm_cam_cmd");
+    mm_camera_super_buf_notify_mode_t notify_mode;
+    mm_channel_queue_node_t *node = NULL;
+    mm_channel_t *ch_obj = (mm_channel_t *)user_data;
+    uint32_t i = 0;
+
+    if (NULL == ch_obj) {
+        return;
+    }
+    if (MM_CAMERA_CMD_TYPE_DATA_CB  == cmd_cb->cmd_type) {
+        /* comp_and_enqueue */
+        mm_channel_superbuf_comp_and_enqueue(
+                        ch_obj,
+                        &ch_obj->bundle.superbuf_queue,
+                        &cmd_cb->u.buf);
+    } else if (MM_CAMERA_CMD_TYPE_REQ_DATA_CB  == cmd_cb->cmd_type) {
+        /* skip frames if needed */
+        ch_obj->pending_cnt = cmd_cb->u.req_buf.num_buf_requested;
+        ch_obj->pending_retro_cnt = cmd_cb->u.req_buf.num_retro_buf_requested;
+        ch_obj->bWaitForPrepSnapshotDone = 0;
+
+        ALOGV("%s:[ZSL Retro] pending cnt (%d), retro count (%d)",
+              __func__, ch_obj->pending_cnt, ch_obj->pending_retro_cnt);
+        if (!ch_obj->pending_cnt || (ch_obj->pending_retro_cnt > ch_obj->pending_cnt)) {
+          ch_obj->pending_retro_cnt = ch_obj->pending_cnt;
+        }
+        if (ch_obj->pending_retro_cnt > 0) {
+          ALOGV("%s: [ZSL Retro] Resetting need Led Flash!!!",
+              __func__);
+          ch_obj->needLEDFlash = 0;
+        }
+        ch_obj->stopZslSnapshot = 0;
+        ch_obj->unLockAEC = 0;
+
+        mm_channel_superbuf_skip(ch_obj, &ch_obj->bundle.superbuf_queue);
+
+    } else if (MM_CAMERA_CMD_TYPE_START_ZSL == cmd_cb->cmd_type) {
+            ch_obj->manualZSLSnapshot = TRUE;
+            mm_camera_start_zsl_snapshot(ch_obj->cam_obj);
+    } else if (MM_CAMERA_CMD_TYPE_STOP_ZSL == cmd_cb->cmd_type) {
+            ch_obj->manualZSLSnapshot = FALSE;
+            mm_camera_stop_zsl_snapshot(ch_obj->cam_obj);
+    } else if (MM_CAMERA_CMD_TYPE_CONFIG_NOTIFY == cmd_cb->cmd_type) {
+           ch_obj->bundle.superbuf_queue.attr.notify_mode = cmd_cb->u.notify_mode;
+    } else if (MM_CAMERA_CMD_TYPE_FLUSH_QUEUE  == cmd_cb->cmd_type) {
+        ch_obj->bundle.superbuf_queue.expected_frame_id = cmd_cb->u.frame_idx;
+        mm_channel_superbuf_flush(ch_obj,
+                &ch_obj->bundle.superbuf_queue, CAM_STREAM_TYPE_DEFAULT);
+        return;
+    } else if (MM_CAMERA_CMD_TYPE_GENERAL == cmd_cb->cmd_type) {
+        CDBG_HIGH("%s:%d] MM_CAMERA_CMD_TYPE_GENERAL", __func__, __LINE__);
+        switch (cmd_cb->u.gen_cmd.type) {
+            case MM_CAMERA_GENERIC_CMD_TYPE_AE_BRACKETING:
+            case MM_CAMERA_GENERIC_CMD_TYPE_AF_BRACKETING: {
+                uint32_t start = cmd_cb->u.gen_cmd.payload[0];
+                CDBG_HIGH("%s:%d] MM_CAMERA_GENERIC_CMDTYPE_AF_BRACKETING %u",
+                    __func__, __LINE__, start);
+                mm_channel_superbuf_flush(ch_obj,
+                        &ch_obj->bundle.superbuf_queue, CAM_STREAM_TYPE_DEFAULT);
+
+                if (start) {
+                    CDBG_HIGH("%s:%d] need AE bracketing, start zsl snapshot",
+                        __func__, __LINE__);
+                    ch_obj->bracketingState = MM_CHANNEL_BRACKETING_STATE_WAIT_GOOD_FRAME_IDX;
+                } else {
+                    ch_obj->bracketingState = MM_CHANNEL_BRACKETING_STATE_OFF;
+                }
+            }
+                break;
+            case MM_CAMERA_GENERIC_CMD_TYPE_FLASH_BRACKETING: {
+                uint32_t start = cmd_cb->u.gen_cmd.payload[0];
+                CDBG_HIGH("%s:%d] MM_CAMERA_GENERIC_CMDTYPE_FLASH_BRACKETING %u",
+                    __func__, __LINE__, start);
+                mm_channel_superbuf_flush(ch_obj,
+                        &ch_obj->bundle.superbuf_queue, CAM_STREAM_TYPE_DEFAULT);
+
+                if (start) {
+                    CDBG_HIGH("%s:%d] need flash bracketing",
+                        __func__, __LINE__);
+                    ch_obj->isFlashBracketingEnabled = TRUE;
+                } else {
+                    ch_obj->isFlashBracketingEnabled = FALSE;
+                }
+            }
+                break;
+            case MM_CAMERA_GENERIC_CMD_TYPE_ZOOM_1X: {
+                uint32_t start = cmd_cb->u.gen_cmd.payload[0];
+                CDBG_HIGH("%s:%d] MM_CAMERA_GENERIC_CMD_TYPE_ZOOM_1X %u",
+                    __func__, __LINE__, start);
+                mm_channel_superbuf_flush(ch_obj,
+                        &ch_obj->bundle.superbuf_queue, CAM_STREAM_TYPE_DEFAULT);
+
+                if (start) {
+                    CDBG_HIGH("%s:%d] need zoom 1x frame",
+                        __func__, __LINE__);
+                    ch_obj->isZoom1xFrameRequested = TRUE;
+                } else {
+                    ch_obj->isZoom1xFrameRequested = FALSE;
+                }
+            }
+                break;
+            case MM_CAMERA_GENERIC_CMD_TYPE_CAPTURE_SETTING: {
+                uint32_t start = cmd_cb->u.gen_cmd.payload[0];
+                CDBG_HIGH("%s:%d] MM_CAMERA_GENERIC_CMD_TYPE_CAPTURE_SETTING %u",
+                    __func__, __LINE__, start);
+
+                mm_channel_superbuf_flush(ch_obj,
+                        &ch_obj->bundle.superbuf_queue, CAM_STREAM_TYPE_DEFAULT);
+                if (start) {
+                    ch_obj->frame_config =
+                            (cam_capture_frame_config_t *)&cmd_cb->u.gen_cmd.frame_config;
+                    CDBG_HIGH("%s:%d] Capture setting Batch Count %d",
+                            __func__, __LINE__, ch_obj->frame_config->num_batch);
+                    for (i = 0; i < ch_obj->frame_config->num_batch; i++) {
+                        CDBG("capture setting frame = %d type = %d",
+                                i,ch_obj->frame_config->configs[i].type);
+                    }
+                } else {
+                    ch_obj->frame_config = NULL;
+                }
+                ch_obj->cur_capture_idx = 0;
+                break;
+            }
+            default:
+                CDBG_ERROR("%s:%d] Error: Invalid command", __func__, __LINE__);
+                break;
+        }
+    }
+    notify_mode = ch_obj->bundle.superbuf_queue.attr.notify_mode;
+
+    if ((ch_obj->pending_cnt > 0)
+        && (ch_obj->needLEDFlash == TRUE ||
+                MM_CHANNEL_BRACKETING_STATE_OFF != ch_obj->bracketingState)
+        && (ch_obj->manualZSLSnapshot == FALSE)
+        && ch_obj->startZSlSnapshotCalled == FALSE) {
+
+      CDBG_HIGH("%s: need flash, start zsl snapshot", __func__);
+      mm_camera_start_zsl_snapshot(ch_obj->cam_obj);
+      ch_obj->startZSlSnapshotCalled = TRUE;
+      ch_obj->burstSnapNum = ch_obj->pending_cnt;
+      ch_obj->bWaitForPrepSnapshotDone = 0;
+    } else if (((ch_obj->pending_cnt == 0) || (ch_obj->stopZslSnapshot == 1))
+            && (ch_obj->manualZSLSnapshot == FALSE)
+            && (ch_obj->startZSlSnapshotCalled == TRUE)) {
+      CDBG_HIGH("%s: Got picture cancelled, stop zsl snapshot", __func__);
+      mm_camera_stop_zsl_snapshot(ch_obj->cam_obj);
+      // Unlock AEC
+      ch_obj->startZSlSnapshotCalled = FALSE;
+      ch_obj->needLEDFlash = FALSE;
+      ch_obj->burstSnapNum = 0;
+      ch_obj->stopZslSnapshot = 0;
+      ch_obj->bWaitForPrepSnapshotDone = 0;
+      ch_obj->unLockAEC = 1;
+      ch_obj->bracketingState = MM_CHANNEL_BRACKETING_STATE_OFF;
+    }
+    /* bufdone for overflowed bufs */
+    mm_channel_superbuf_bufdone_overflow(ch_obj, &ch_obj->bundle.superbuf_queue);
+
+    CDBG("%s: Super Buffer received, pending_cnt=%d",
+        __func__, ch_obj->pending_cnt);
+    /* dispatch frame if pending_cnt>0 or is in continuous streaming mode */
+
+    CDBG("%s: [ZSL Retro] Out loop pending cnt (%d), retro count (%d)",
+          __func__, ch_obj->pending_cnt, ch_obj->pending_retro_cnt);
+    while (((ch_obj->pending_cnt > 0) ||
+             (MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS == notify_mode)) &&
+             (!ch_obj->bWaitForPrepSnapshotDone)) {
+
+      CDBG("%s: [ZSL Retro] In loop pending cnt (%d), retro count (%d)",
+            __func__, ch_obj->pending_cnt, ch_obj->pending_retro_cnt);
+        /* dequeue */
+        node = mm_channel_superbuf_dequeue(&ch_obj->bundle.superbuf_queue);
+        if (NULL != node) {
+             uint8_t bReady = 0;
+            /* decrease pending_cnt */
+            if (MM_CAMERA_SUPER_BUF_NOTIFY_BURST == notify_mode) {
+                ch_obj->pending_cnt--;
+                if (ch_obj->pending_retro_cnt > 0) {
+                  if (ch_obj->pending_retro_cnt == 1) {
+                    ch_obj->bWaitForPrepSnapshotDone = 1;
+                    // Retro Snaps are done..
+                    bReady = 1;
+                  }
+                  ch_obj->pending_retro_cnt--;
+                }
+                CDBG("%s: [ZSL Retro] Super Buffer received, Call client callback,"
+                    "pending_cnt=%d", __func__, ch_obj->pending_cnt);
+
+                if (((ch_obj->pending_cnt == 0) ||
+                      (ch_obj->stopZslSnapshot == 1)) &&
+                      (ch_obj->manualZSLSnapshot == FALSE) &&
+                       ch_obj->startZSlSnapshotCalled == TRUE) {
+                    CDBG("%s: [ZSL Retro] Received all frames requested, stop zsl snapshot", __func__);
+                    mm_camera_stop_zsl_snapshot(ch_obj->cam_obj);
+                    ch_obj->startZSlSnapshotCalled = FALSE;
+                    ch_obj->burstSnapNum = 0;
+                    ch_obj->stopZslSnapshot = 0;
+                    ch_obj->unLockAEC = 1;
+                    ch_obj->needLEDFlash = FALSE;
+                    ch_obj->bracketingState = MM_CHANNEL_BRACKETING_STATE_OFF;
+                }
+
+                if (ch_obj->frame_config != NULL) {
+                    if (ch_obj->frame_config->configs[ch_obj->cur_capture_idx].num_frames != 0) {
+                        ch_obj->frame_config->configs[ch_obj->cur_capture_idx].num_frames--;
+                        CDBG("Need %d frames more for batch %d",
+                                ch_obj->frame_config->configs[ch_obj->cur_capture_idx].num_frames,
+                                ch_obj->cur_capture_idx);
+                    }
+                }
+            }
+            /* dispatch superbuf */
+            if (NULL != ch_obj->bundle.super_buf_notify_cb) {
+                uint8_t i;
+                mm_camera_cmdcb_t* cb_node = NULL;
+
+                CDBG("%s: Send superbuf to HAL, pending_cnt=%d",
+                     __func__, ch_obj->pending_cnt);
+
+                /* send cam_sem_post to wake up cb thread to dispatch super buffer */
+                cb_node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+                if (NULL != cb_node) {
+                    memset(cb_node, 0, sizeof(mm_camera_cmdcb_t));
+                    cb_node->cmd_type = MM_CAMERA_CMD_TYPE_SUPER_BUF_DATA_CB;
+                    cb_node->u.superbuf.num_bufs = node->num_of_bufs;
+                    for (i=0; i<node->num_of_bufs; i++) {
+                        cb_node->u.superbuf.bufs[i] = node->super_buf[i].buf;
+                    }
+                    cb_node->u.superbuf.camera_handle = ch_obj->cam_obj->my_hdl;
+                    cb_node->u.superbuf.ch_id = ch_obj->my_hdl;
+                    cb_node->u.superbuf.bReadyForPrepareSnapshot = bReady;
+                    if (ch_obj->unLockAEC == 1) {
+                      cb_node->u.superbuf.bUnlockAEC = 1;
+                      ALOGE("%s:[ZSL Retro] Unlocking AEC", __func__);
+                      ch_obj->unLockAEC = 0;
+                    }
+
+                    /* enqueue to cb thread */
+                    cam_queue_enq(&(ch_obj->cb_thread.cmd_queue), cb_node);
+                    /* wake up cb thread */
+                    cam_sem_post(&(ch_obj->cb_thread.cmd_sem));
+                } else {
+                    CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+                    /* buf done with the nonuse super buf */
+                    for (i=0; i<node->num_of_bufs; i++) {
+                        mm_channel_qbuf(ch_obj, node->super_buf[i].buf);
+                    }
+                }
+            } else {
+                /* buf done with the nonuse super buf */
+                uint8_t i;
+                for (i=0; i<node->num_of_bufs; i++) {
+                    mm_channel_qbuf(ch_obj, node->super_buf[i].buf);
+                }
+            }
+            free(node);
+        } else {
+            /* no superbuf avail, break the loop */
+            break;
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_fsm_fn
+ *
+ * DESCRIPTION: channel finite state machine entry function. Depends on channel
+ *              state, incoming event will be handled differently.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a channel object
+ *   @evt      : channel event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_fsm_fn(mm_channel_t *my_obj,
+                          mm_channel_evt_type_t evt,
+                          void * in_val,
+                          void * out_val)
+{
+    int32_t rc = -1;
+
+    CDBG("%s : E state = %d", __func__, my_obj->state);
+    switch (my_obj->state) {
+    case MM_CHANNEL_STATE_NOTUSED:
+        rc = mm_channel_fsm_fn_notused(my_obj, evt, in_val, out_val);
+        break;
+    case MM_CHANNEL_STATE_STOPPED:
+        rc = mm_channel_fsm_fn_stopped(my_obj, evt, in_val, out_val);
+        break;
+    case MM_CHANNEL_STATE_ACTIVE:
+        rc = mm_channel_fsm_fn_active(my_obj, evt, in_val, out_val);
+        break;
+    case MM_CHANNEL_STATE_PAUSED:
+        rc = mm_channel_fsm_fn_paused(my_obj, evt, in_val, out_val);
+        break;
+    default:
+        CDBG("%s: Not a valid state (%d)", __func__, my_obj->state);
+        break;
+    }
+
+    /* unlock ch_lock */
+    pthread_mutex_unlock(&my_obj->ch_lock);
+    CDBG("%s : X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_fsm_fn_notused
+ *
+ * DESCRIPTION: channel finite state machine function to handle event
+ *              in NOT_USED state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a channel object
+ *   @evt      : channel event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_fsm_fn_notused(mm_channel_t *my_obj,
+                                  mm_channel_evt_type_t evt,
+                                  void * in_val,
+                                  void * out_val)
+{
+    int32_t rc = -1;
+
+    switch (evt) {
+    default:
+        CDBG_ERROR("%s: invalid state (%d) for evt (%d), in(%p), out(%p)",
+                   __func__, my_obj->state, evt, in_val, out_val);
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_fsm_fn_stopped
+ *
+ * DESCRIPTION: channel finite state machine function to handle event
+ *              in STOPPED state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a channel object
+ *   @evt      : channel event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_fsm_fn_stopped(mm_channel_t *my_obj,
+                                  mm_channel_evt_type_t evt,
+                                  void * in_val,
+                                  void * out_val)
+{
+    int32_t rc = 0;
+    CDBG("%s : E evt = %d", __func__, evt);
+    switch (evt) {
+    case MM_CHANNEL_EVT_ADD_STREAM:
+        {
+            uint32_t s_hdl = 0;
+            s_hdl = mm_channel_add_stream(my_obj);
+            *((uint32_t*)out_val) = s_hdl;
+            rc = 0;
+        }
+        break;
+    case MM_CHANNEL_EVT_LINK_STREAM:
+        {
+            mm_camera_stream_link_t *stream_link = NULL;
+            uint32_t s_hdl = 0;
+            stream_link = (mm_camera_stream_link_t *) in_val;
+            s_hdl = mm_channel_link_stream(my_obj, stream_link);
+            *((uint32_t*)out_val) = s_hdl;
+            rc = 0;
+        }
+        break;
+    case MM_CHANNEL_EVT_DEL_STREAM:
+        {
+            uint32_t s_id = *((uint32_t *)in_val);
+            rc = mm_channel_del_stream(my_obj, s_id);
+        }
+        break;
+    case MM_CHANNEL_EVT_START:
+        {
+            rc = mm_channel_start(my_obj);
+            /* first stream started in stopped state
+             * move to active state */
+            if (0 == rc) {
+                my_obj->state = MM_CHANNEL_STATE_ACTIVE;
+            }
+        }
+        break;
+    case MM_CHANNEL_EVT_CONFIG_STREAM:
+        {
+            mm_evt_paylod_config_stream_t *payload =
+                (mm_evt_paylod_config_stream_t *)in_val;
+            rc = mm_channel_config_stream(my_obj,
+                                          payload->stream_id,
+                                          payload->config);
+        }
+        break;
+    case MM_CHANNEL_EVT_GET_BUNDLE_INFO:
+        {
+            cam_bundle_config_t *payload =
+                (cam_bundle_config_t *)in_val;
+            rc = mm_channel_get_bundle_info(my_obj, payload);
+        }
+        break;
+    case MM_CHANNEL_EVT_DELETE:
+        {
+            mm_channel_release(my_obj);
+            rc = 0;
+        }
+        break;
+    case MM_CHANNEL_EVT_SET_STREAM_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_channel_set_stream_parm(my_obj, payload);
+        }
+        break;
+    case MM_CHANNEL_EVT_GET_STREAM_QUEUED_BUF_COUNT:
+        {
+            uint32_t stream_id = *((uint32_t *)in_val);
+            rc = mm_channel_get_queued_buf_count(my_obj, stream_id);
+        }
+        break;
+    case MM_CHANNEL_EVT_GET_STREAM_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_channel_get_stream_parm(my_obj, payload);
+        }
+        break;
+    case MM_CHANNEL_EVT_DO_STREAM_ACTION:
+        {
+            mm_evt_paylod_do_stream_action_t *payload =
+                (mm_evt_paylod_do_stream_action_t *)in_val;
+            rc = mm_channel_do_stream_action(my_obj, payload);
+        }
+        break;
+    case MM_CHANNEL_EVT_MAP_STREAM_BUF:
+        {
+            mm_evt_paylod_map_stream_buf_t *payload =
+                (mm_evt_paylod_map_stream_buf_t *)in_val;
+            rc = mm_channel_map_stream_buf(my_obj, payload);
+        }
+        break;
+    case MM_CHANNEL_EVT_UNMAP_STREAM_BUF:
+        {
+            mm_evt_paylod_unmap_stream_buf_t *payload =
+                (mm_evt_paylod_unmap_stream_buf_t *)in_val;
+            rc = mm_channel_unmap_stream_buf(my_obj, payload);
+        }
+        break;
+    default:
+        CDBG_ERROR("%s: invalid state (%d) for evt (%d)",
+                   __func__, my_obj->state, evt);
+        break;
+    }
+    CDBG("%s : E rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_fsm_fn_active
+ *
+ * DESCRIPTION: channel finite state machine function to handle event
+ *              in ACTIVE state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a channel object
+ *   @evt      : channel event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_fsm_fn_active(mm_channel_t *my_obj,
+                          mm_channel_evt_type_t evt,
+                          void * in_val,
+                          void * out_val)
+{
+    int32_t rc = 0;
+
+    CDBG("%s : E evt = %d", __func__, evt);
+    switch (evt) {
+    case MM_CHANNEL_EVT_STOP:
+        {
+            rc = mm_channel_stop(my_obj);
+            my_obj->state = MM_CHANNEL_STATE_STOPPED;
+        }
+        break;
+    case MM_CHANNEL_EVT_REQUEST_SUPER_BUF:
+        {
+            uint32_t num_buf_requested = *((uint32_t *)in_val);
+            uint32_t num_retro_buf_requested = *((uint32_t *)out_val);
+            rc = mm_channel_request_super_buf(my_obj,
+                num_buf_requested, num_retro_buf_requested);
+        }
+        break;
+    case MM_CHANNEL_EVT_CANCEL_REQUEST_SUPER_BUF:
+        {
+            rc = mm_channel_cancel_super_buf_request(my_obj);
+        }
+        break;
+    case MM_CHANNEL_EVT_FLUSH_SUPER_BUF_QUEUE:
+        {
+            uint32_t frame_idx = *((uint32_t *)in_val);
+            rc = mm_channel_flush_super_buf_queue(my_obj, frame_idx);
+        }
+        break;
+    case MM_CHANNEL_EVT_START_ZSL_SNAPSHOT:
+        {
+            rc = mm_channel_start_zsl_snapshot(my_obj);
+        }
+        break;
+    case MM_CHANNEL_EVT_STOP_ZSL_SNAPSHOT:
+        {
+            rc = mm_channel_stop_zsl_snapshot(my_obj);
+        }
+        break;
+    case MM_CHANNEL_EVT_CONFIG_NOTIFY_MODE:
+        {
+            mm_camera_super_buf_notify_mode_t notify_mode =
+                *((mm_camera_super_buf_notify_mode_t *)in_val);
+            rc = mm_channel_config_notify_mode(my_obj, notify_mode);
+        }
+        break;
+    case MM_CHANNEL_EVT_SET_STREAM_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_channel_set_stream_parm(my_obj, payload);
+        }
+        break;
+    case MM_CHANNEL_EVT_GET_STREAM_QUEUED_BUF_COUNT:
+        {
+            uint32_t stream_id = *((uint32_t *)in_val);
+            rc = mm_channel_get_queued_buf_count(my_obj, stream_id);
+        }
+        break;
+    case MM_CHANNEL_EVT_GET_STREAM_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_channel_get_stream_parm(my_obj, payload);
+        }
+        break;
+    case MM_CHANNEL_EVT_DO_STREAM_ACTION:
+        {
+            mm_evt_paylod_do_stream_action_t *payload =
+                (mm_evt_paylod_do_stream_action_t *)in_val;
+            rc = mm_channel_do_stream_action(my_obj, payload);
+        }
+        break;
+    case MM_CHANNEL_EVT_MAP_STREAM_BUF:
+        {
+            mm_evt_paylod_map_stream_buf_t *payload =
+                (mm_evt_paylod_map_stream_buf_t *)in_val;
+            if (payload != NULL) {
+                uint8_t type = payload->buf_type;
+                if ((type == CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF) ||
+                        (type == CAM_MAPPING_BUF_TYPE_OFFLINE_META_BUF)) {
+                    rc = mm_channel_map_stream_buf(my_obj, payload);
+                }
+            } else {
+                CDBG_ERROR("%s: cannot map regualr stream buf in active state", __func__);
+            }
+        }
+        break;
+    case MM_CHANNEL_EVT_UNMAP_STREAM_BUF:
+        {
+            mm_evt_paylod_unmap_stream_buf_t *payload =
+                (mm_evt_paylod_unmap_stream_buf_t *)in_val;
+            if (payload != NULL) {
+                uint8_t type = payload->buf_type;
+                if ((type == CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF) ||
+                        (type == CAM_MAPPING_BUF_TYPE_OFFLINE_META_BUF)) {
+                    rc = mm_channel_unmap_stream_buf(my_obj, payload);
+                }
+            } else {
+                CDBG_ERROR("%s: cannot unmap regualr stream buf in active state", __func__);
+            }
+        }
+        break;
+    case MM_CHANNEL_EVT_AF_BRACKETING:
+        {
+            CDBG_HIGH("MM_CHANNEL_EVT_AF_BRACKETING");
+            uint32_t start_flag = *((uint32_t *)in_val);
+            mm_camera_generic_cmd_t gen_cmd;
+            gen_cmd.type = MM_CAMERA_GENERIC_CMD_TYPE_AF_BRACKETING;
+            gen_cmd.payload[0] = start_flag;
+            rc = mm_channel_proc_general_cmd(my_obj, &gen_cmd);
+        }
+        break;
+    case MM_CHANNEL_EVT_AE_BRACKETING:
+        {
+            CDBG_HIGH("MM_CHANNEL_EVT_AE_BRACKETING");
+            uint32_t start_flag = *((uint32_t *)in_val);
+            mm_camera_generic_cmd_t gen_cmd;
+            gen_cmd.type = MM_CAMERA_GENERIC_CMD_TYPE_AE_BRACKETING;
+            gen_cmd.payload[0] = start_flag;
+            rc = mm_channel_proc_general_cmd(my_obj, &gen_cmd);
+        }
+        break;
+    case MM_CHANNEL_EVT_FLASH_BRACKETING:
+        {
+            CDBG_HIGH("MM_CHANNEL_EVT_FLASH_BRACKETING");
+            uint32_t start_flag = *((uint32_t *)in_val);
+            mm_camera_generic_cmd_t gen_cmd;
+            gen_cmd.type = MM_CAMERA_GENERIC_CMD_TYPE_FLASH_BRACKETING;
+            gen_cmd.payload[0] = start_flag;
+            rc = mm_channel_proc_general_cmd(my_obj, &gen_cmd);
+        }
+        break;
+    case MM_CHANNEL_EVT_ZOOM_1X:
+        {
+            CDBG_HIGH("MM_CHANNEL_EVT_ZOOM_1X");
+            uint32_t start_flag = *((uint32_t *)in_val);
+            mm_camera_generic_cmd_t gen_cmd;
+            gen_cmd.type = MM_CAMERA_GENERIC_CMD_TYPE_ZOOM_1X;
+            gen_cmd.payload[0] = start_flag;
+            rc = mm_channel_proc_general_cmd(my_obj, &gen_cmd);
+        }
+        break;
+    case MM_CAMERA_EVT_CAPTURE_SETTING:
+        {
+            mm_camera_generic_cmd_t gen_cmd;
+            cam_capture_frame_config_t *input;
+            uint32_t i = 0;
+            gen_cmd.type = MM_CAMERA_GENERIC_CMD_TYPE_CAPTURE_SETTING;
+            CDBG_HIGH("MM_CAMERA_EVT_CAPTURE_SETTING");
+            if (in_val == NULL) {
+                gen_cmd.payload[0] = 0;
+                memset(&gen_cmd.frame_config, 0, sizeof(cam_capture_frame_config_t));
+            } else {
+                gen_cmd.payload[0] = 1;
+                input = (cam_capture_frame_config_t *)in_val;
+                gen_cmd.frame_config = *input;
+            }
+            rc = mm_channel_proc_general_cmd(my_obj, &gen_cmd);
+        }
+        break;
+     default:
+        CDBG_ERROR("%s: invalid state (%d) for evt (%d), in(%p), out(%p)",
+                   __func__, my_obj->state, evt, in_val, out_val);
+        break;
+    }
+    CDBG("%s : X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_fsm_fn_paused
+ *
+ * DESCRIPTION: channel finite state machine function to handle event
+ *              in PAUSED state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a channel object
+ *   @evt      : channel event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_fsm_fn_paused(mm_channel_t *my_obj,
+                          mm_channel_evt_type_t evt,
+                          void * in_val,
+                          void * out_val)
+{
+    int32_t rc = 0;
+
+    /* currently we are not supporting pause/resume channel */
+    CDBG_ERROR("%s: invalid state (%d) for evt (%d), in(%p), out(%p)",
+               __func__, my_obj->state, evt, in_val, out_val);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_init
+ *
+ * DESCRIPTION: initialize a channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object be to initialized
+ *   @attr         : bundle attribute of the channel if needed
+ *   @channel_cb   : callback function for bundle data notify
+ *   @userdata     : user data ptr
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : if no bundle data notify is needed, meaning each stream in the
+ *              channel will have its own stream data notify callback, then
+ *              attr, channel_cb, and userdata can be NULL. In this case,
+ *              no matching logic will be performed in channel for the bundling.
+ *==========================================================================*/
+int32_t mm_channel_init(mm_channel_t *my_obj,
+                        mm_camera_channel_attr_t *attr,
+                        mm_camera_buf_notify_t channel_cb,
+                        void *userdata)
+{
+    int32_t rc = 0;
+
+    my_obj->bundle.super_buf_notify_cb = channel_cb;
+    my_obj->bundle.user_data = userdata;
+    if (NULL != attr) {
+        my_obj->bundle.superbuf_queue.attr = *attr;
+    }
+
+    CDBG("%s : Launch data poll thread in channel open", __func__);
+    snprintf(my_obj->threadName, THREAD_NAME_SIZE, "DataPoll");
+    mm_camera_poll_thread_launch(&my_obj->poll_thread[0],
+                                 MM_CAMERA_POLL_TYPE_DATA);
+
+    /* change state to stopped state */
+    my_obj->state = MM_CHANNEL_STATE_STOPPED;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_release
+ *
+ * DESCRIPTION: release a channel resource. Channel state will move to UNUSED
+ *              state after this call.
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void mm_channel_release(mm_channel_t *my_obj)
+{
+    /* stop data poll thread */
+    mm_camera_poll_thread_release(&my_obj->poll_thread[0]);
+
+    /* change state to notused state */
+    my_obj->state = MM_CHANNEL_STATE_NOTUSED;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_link_stream
+ *
+ * DESCRIPTION: link a stream from external channel into this channel
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *   @stream_link  : channel and stream to be linked
+ *
+ * RETURN     : uint32_t type of stream handle
+ *              0  -- invalid stream handle, meaning the op failed
+ *              >0 -- successfully added a stream with a valid handle
+ *==========================================================================*/
+uint32_t mm_channel_link_stream(mm_channel_t *my_obj,
+        mm_camera_stream_link_t *stream_link)
+{
+    uint8_t idx = 0;
+    uint32_t s_hdl = 0;
+    mm_stream_t *stream_obj = NULL;
+    mm_stream_t *stream = NULL;
+
+    if (NULL == stream_link) {
+        CDBG_ERROR("%s : Invalid stream link", __func__);
+        return 0;
+    }
+
+    stream = mm_channel_util_get_stream_by_handler(stream_link->ch,
+            stream_link->stream_id);
+    if (NULL == stream) {
+        return 0;
+    }
+
+    /* check available stream */
+    for (idx = 0; idx < MAX_STREAM_NUM_IN_BUNDLE; idx++) {
+        if (MM_STREAM_STATE_NOTUSED == my_obj->streams[idx].state) {
+            stream_obj = &my_obj->streams[idx];
+            break;
+        }
+    }
+    if (NULL == stream_obj) {
+        CDBG_ERROR("%s: streams reach max, no more stream allowed to add",
+                __func__);
+        return s_hdl;
+    }
+
+    /* initialize stream object */
+    *stream_obj = *stream;
+    stream_obj->linked_stream = stream;
+    s_hdl = stream->my_hdl;
+
+    CDBG("%s : stream handle = %d", __func__, s_hdl);
+    return s_hdl;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_add_stream
+ *
+ * DESCRIPTION: add a stream into the channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *
+ * RETURN     : uint32_t type of stream handle
+ *              0  -- invalid stream handle, meaning the op failed
+ *              >0 -- successfully added a stream with a valid handle
+ *==========================================================================*/
+uint32_t mm_channel_add_stream(mm_channel_t *my_obj)
+{
+    int32_t rc = 0;
+    uint8_t idx = 0;
+    uint32_t s_hdl = 0;
+    mm_stream_t *stream_obj = NULL;
+
+    CDBG("%s : E", __func__);
+    /* check available stream */
+    for (idx = 0; idx < MAX_STREAM_NUM_IN_BUNDLE; idx++) {
+        if (MM_STREAM_STATE_NOTUSED == my_obj->streams[idx].state) {
+            stream_obj = &my_obj->streams[idx];
+            break;
+        }
+    }
+    if (NULL == stream_obj) {
+        CDBG_ERROR("%s: streams reach max, no more stream allowed to add", __func__);
+        return s_hdl;
+    }
+
+    /* initialize stream object */
+    memset(stream_obj, 0, sizeof(mm_stream_t));
+    stream_obj->fd = -1;
+    stream_obj->my_hdl = mm_camera_util_generate_handler(idx);
+    stream_obj->ch_obj = my_obj;
+    pthread_mutex_init(&stream_obj->buf_lock, NULL);
+    pthread_mutex_init(&stream_obj->cb_lock, NULL);
+    stream_obj->state = MM_STREAM_STATE_INITED;
+
+    /* acquire stream */
+    rc = mm_stream_fsm_fn(stream_obj, MM_STREAM_EVT_ACQUIRE, NULL, NULL);
+    if (0 == rc) {
+        s_hdl = stream_obj->my_hdl;
+    } else {
+        /* error during acquire, de-init */
+        pthread_mutex_destroy(&stream_obj->buf_lock);
+        pthread_mutex_destroy(&stream_obj->cb_lock);
+        memset(stream_obj, 0, sizeof(mm_stream_t));
+    }
+    CDBG("%s : stream handle = %d", __func__, s_hdl);
+    return s_hdl;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_del_stream
+ *
+ * DESCRIPTION: delete a stream from the channel bu its handle
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @stream_id    : stream handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : assume steam is stooped before it can be deleted
+ *==========================================================================*/
+int32_t mm_channel_del_stream(mm_channel_t *my_obj,
+                              uint32_t stream_id)
+{
+    int rc = -1;
+    mm_stream_t * stream_obj = NULL;
+    stream_obj = mm_channel_util_get_stream_by_handler(my_obj, stream_id);
+
+    if (NULL == stream_obj) {
+        CDBG_ERROR("%s :Invalid Stream Object for stream_id = %d",
+                   __func__, stream_id);
+        return rc;
+    }
+
+    if (stream_obj->ch_obj != my_obj) {
+        /* Only unlink stream */
+        pthread_mutex_lock(&stream_obj->linked_stream->buf_lock);
+        stream_obj->linked_stream->is_linked = 0;
+        stream_obj->linked_stream->linked_obj = NULL;
+        pthread_mutex_unlock(&stream_obj->linked_stream->buf_lock);
+        memset(stream_obj, 0, sizeof(mm_stream_t));
+
+        return 0;
+    }
+
+    rc = mm_stream_fsm_fn(stream_obj,
+                          MM_STREAM_EVT_RELEASE,
+                          NULL,
+                          NULL);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_config_stream
+ *
+ * DESCRIPTION: configure a stream
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @stream_id    : stream handle
+ *   @config       : stream configuration
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_config_stream(mm_channel_t *my_obj,
+                                   uint32_t stream_id,
+                                   mm_camera_stream_config_t *config)
+{
+    int rc = -1;
+    mm_stream_t * stream_obj = NULL;
+    CDBG("%s : E stream ID = %d", __func__, stream_id);
+    stream_obj = mm_channel_util_get_stream_by_handler(my_obj, stream_id);
+
+    if (NULL == stream_obj) {
+        CDBG_ERROR("%s :Invalid Stream Object for stream_id = %d", __func__, stream_id);
+        return rc;
+    }
+
+    if (stream_obj->ch_obj != my_obj) {
+        /* No op. on linked streams */
+        return 0;
+    }
+
+    /* set stream fmt */
+    rc = mm_stream_fsm_fn(stream_obj,
+                          MM_STREAM_EVT_SET_FMT,
+                          (void *)config,
+                          NULL);
+    CDBG("%s : X rc = %d",__func__,rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_get_bundle_info
+ *
+ * DESCRIPTION: query bundle info of the channel, which should include all
+ *              streams within this channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @bundle_info  : bundle info to be filled in
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_get_bundle_info(mm_channel_t *my_obj,
+                                   cam_bundle_config_t *bundle_info)
+{
+    int i;
+    mm_stream_t *s_obj = NULL;
+    cam_stream_type_t stream_type = CAM_STREAM_TYPE_DEFAULT;
+    int32_t rc = 0;
+
+    memset(bundle_info, 0, sizeof(cam_bundle_config_t));
+    bundle_info->bundle_id = my_obj->my_hdl;
+    bundle_info->num_of_streams = 0;
+    for (i = 0; i < MAX_STREAM_NUM_IN_BUNDLE; i++) {
+        if (my_obj->streams[i].my_hdl > 0) {
+            s_obj = mm_channel_util_get_stream_by_handler(my_obj,
+                                                          my_obj->streams[i].my_hdl);
+            if (NULL != s_obj) {
+                stream_type = s_obj->stream_info->stream_type;
+                if ((CAM_STREAM_TYPE_METADATA != stream_type) &&
+                        (s_obj->ch_obj == my_obj)) {
+                    bundle_info->stream_ids[bundle_info->num_of_streams++] =
+                                                        s_obj->server_stream_id;
+                }
+            } else {
+                CDBG_ERROR("%s: cannot find stream obj (%d) by handler (%d)",
+                           __func__, i, my_obj->streams[i].my_hdl);
+                rc = -1;
+                break;
+            }
+        }
+    }
+    if (rc != 0) {
+        /* error, reset to 0 */
+        memset(bundle_info, 0, sizeof(cam_bundle_config_t));
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_start
+ *
+ * DESCRIPTION: start a channel, which will start all streams in the channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_start(mm_channel_t *my_obj)
+{
+    int32_t rc = 0;
+    int i = 0, j = 0;
+    mm_stream_t *s_objs[MAX_STREAM_NUM_IN_BUNDLE] = {NULL};
+    uint8_t num_streams_to_start = 0;
+    uint8_t num_streams_in_bundle_queue = 0;
+    mm_stream_t *s_obj = NULL;
+    int meta_stream_idx = 0;
+    cam_stream_type_t stream_type = CAM_STREAM_TYPE_DEFAULT;
+
+    for (i = 0; i < MAX_STREAM_NUM_IN_BUNDLE; i++) {
+        if (my_obj->streams[i].my_hdl > 0) {
+            s_obj = mm_channel_util_get_stream_by_handler(my_obj,
+                                                          my_obj->streams[i].my_hdl);
+            if (NULL != s_obj) {
+                stream_type = s_obj->stream_info->stream_type;
+                /* remember meta data stream index */
+                if ((stream_type == CAM_STREAM_TYPE_METADATA) &&
+                        (s_obj->ch_obj == my_obj)) {
+                    meta_stream_idx = num_streams_to_start;
+                }
+                s_objs[num_streams_to_start++] = s_obj;
+
+                if (!s_obj->stream_info->noFrameExpected) {
+                    num_streams_in_bundle_queue++;
+                }
+            }
+        }
+    }
+
+    if (meta_stream_idx > 0 ) {
+        /* always start meta data stream first, so switch the stream object with the first one */
+        s_obj = s_objs[0];
+        s_objs[0] = s_objs[meta_stream_idx];
+        s_objs[meta_stream_idx] = s_obj;
+    }
+
+    if (NULL != my_obj->bundle.super_buf_notify_cb) {
+        /* need to send up cb, therefore launch thread */
+        /* init superbuf queue */
+        mm_channel_superbuf_queue_init(&my_obj->bundle.superbuf_queue);
+        my_obj->bundle.superbuf_queue.num_streams = num_streams_in_bundle_queue;
+        my_obj->bundle.superbuf_queue.expected_frame_id = 0;
+        my_obj->bundle.superbuf_queue.expected_frame_id_without_led = 0;
+        my_obj->bundle.superbuf_queue.led_off_start_frame_id = 0;
+        my_obj->bundle.superbuf_queue.led_on_start_frame_id = 0;
+        my_obj->bundle.superbuf_queue.led_on_num_frames = 0;
+
+        for (i = 0; i < num_streams_to_start; i++) {
+            /* Only bundle streams that belong to the channel */
+            if(!(s_objs[i]->stream_info->noFrameExpected)) {
+                if (s_objs[i]->ch_obj == my_obj) {
+                    /* set bundled flag to streams */
+                    s_objs[i]->is_bundled = 1;
+                }
+                my_obj->bundle.superbuf_queue.bundled_streams[j++] = s_objs[i]->my_hdl;
+            }
+        }
+
+        /* launch cb thread for dispatching super buf through cb */
+        snprintf(my_obj->cb_thread.threadName, THREAD_NAME_SIZE, "CAM_SuperBuf");
+        mm_camera_cmd_thread_launch(&my_obj->cb_thread,
+                                    mm_channel_dispatch_super_buf,
+                                    (void*)my_obj);
+
+        /* launch cmd thread for super buf dataCB */
+        snprintf(my_obj->cmd_thread.threadName, THREAD_NAME_SIZE, "CAM_SuperBufCB");
+        mm_camera_cmd_thread_launch(&my_obj->cmd_thread,
+                                    mm_channel_process_stream_buf,
+                                    (void*)my_obj);
+
+        /* set flag to TRUE */
+        my_obj->bundle.is_active = TRUE;
+    }
+
+    /* link any streams first before starting the rest of the streams */
+    for (i = 0; i < num_streams_to_start; i++) {
+        if (s_objs[i]->ch_obj != my_obj) {
+            pthread_mutex_lock(&s_objs[i]->linked_stream->buf_lock);
+            s_objs[i]->linked_stream->linked_obj = my_obj;
+            s_objs[i]->linked_stream->is_linked = 1;
+            pthread_mutex_unlock(&s_objs[i]->linked_stream->buf_lock);
+            continue;
+        }
+    }
+
+    for (i = 0; i < num_streams_to_start; i++) {
+        if (s_objs[i]->ch_obj != my_obj) {
+            continue;
+        }
+        /* all streams within a channel should be started at the same time */
+        if (s_objs[i]->state == MM_STREAM_STATE_ACTIVE) {
+            CDBG_ERROR("%s: stream already started idx(%d)", __func__, i);
+            rc = -1;
+            break;
+        }
+
+        /* allocate buf */
+        rc = mm_stream_fsm_fn(s_objs[i],
+                              MM_STREAM_EVT_GET_BUF,
+                              NULL,
+                              NULL);
+        if (0 != rc) {
+            CDBG_ERROR("%s: get buf failed at idx(%d)", __func__, i);
+            break;
+        }
+
+        /* reg buf */
+        rc = mm_stream_fsm_fn(s_objs[i],
+                              MM_STREAM_EVT_REG_BUF,
+                              NULL,
+                              NULL);
+        if (0 != rc) {
+            CDBG_ERROR("%s: reg buf failed at idx(%d)", __func__, i);
+            break;
+        }
+
+        /* start stream */
+        rc = mm_stream_fsm_fn(s_objs[i],
+                              MM_STREAM_EVT_START,
+                              NULL,
+                              NULL);
+        if (0 != rc) {
+            CDBG_ERROR("%s: start stream failed at idx(%d)", __func__, i);
+            break;
+        }
+    }
+
+    /* error handling */
+    if (0 != rc) {
+        /* unlink the streams first */
+        for (j = 0; j < num_streams_to_start; j++) {
+            if (s_objs[j]->ch_obj != my_obj) {
+                pthread_mutex_lock(&s_objs[j]->linked_stream->buf_lock);
+                s_objs[j]->linked_stream->is_linked = 0;
+                s_objs[j]->linked_stream->linked_obj = NULL;
+                pthread_mutex_unlock(&s_objs[j]->linked_stream->buf_lock);
+
+                if (TRUE == my_obj->bundle.is_active) {
+                    mm_channel_superbuf_flush(my_obj,
+                            &my_obj->bundle.superbuf_queue,
+                            s_objs[j]->stream_info->stream_type);
+                }
+                memset(s_objs[j], 0, sizeof(mm_stream_t));
+                continue;
+            }
+        }
+
+        for (j = 0; j <= i; j++) {
+            if ((NULL == s_objs[j]) || (s_objs[j]->ch_obj != my_obj)) {
+                continue;
+            }
+            /* stop streams*/
+            mm_stream_fsm_fn(s_objs[j],
+                             MM_STREAM_EVT_STOP,
+                             NULL,
+                             NULL);
+
+            /* unreg buf */
+            mm_stream_fsm_fn(s_objs[j],
+                             MM_STREAM_EVT_UNREG_BUF,
+                             NULL,
+                             NULL);
+
+            /* put buf back */
+            mm_stream_fsm_fn(s_objs[j],
+                             MM_STREAM_EVT_PUT_BUF,
+                             NULL,
+                             NULL);
+        }
+
+        /* destroy super buf cmd thread */
+        if (TRUE == my_obj->bundle.is_active) {
+            /* first stop bundle thread */
+            mm_camera_cmd_thread_release(&my_obj->cmd_thread);
+            mm_camera_cmd_thread_release(&my_obj->cb_thread);
+
+            /* deinit superbuf queue */
+            mm_channel_superbuf_queue_deinit(&my_obj->bundle.superbuf_queue);
+
+            /* memset bundle info */
+            memset(&my_obj->bundle, 0, sizeof(mm_channel_bundle_t));
+        }
+    }
+    my_obj->bWaitForPrepSnapshotDone = 0;
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_stop
+ *
+ * DESCRIPTION: stop a channel, which will stop all streams in the channel
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_stop(mm_channel_t *my_obj)
+{
+    int32_t rc = 0;
+    int i;
+    mm_stream_t *s_objs[MAX_STREAM_NUM_IN_BUNDLE] = {NULL};
+    uint8_t num_streams_to_stop = 0;
+    mm_stream_t *s_obj = NULL;
+    int meta_stream_idx = 0;
+    cam_stream_type_t stream_type = CAM_STREAM_TYPE_DEFAULT;
+
+    for (i = 0; i < MAX_STREAM_NUM_IN_BUNDLE; i++) {
+        if (my_obj->streams[i].my_hdl > 0) {
+            s_obj = mm_channel_util_get_stream_by_handler(my_obj,
+                                                          my_obj->streams[i].my_hdl);
+            if (NULL != s_obj) {
+                if (s_obj->ch_obj == my_obj) {
+                    stream_type = s_obj->stream_info->stream_type;
+                    /* remember meta data stream index */
+                    if (stream_type == CAM_STREAM_TYPE_METADATA) {
+                        meta_stream_idx = num_streams_to_stop;
+                    }
+                }
+                s_objs[num_streams_to_stop++] = s_obj;
+            }
+        }
+    }
+
+    if (meta_stream_idx < num_streams_to_stop - 1 ) {
+        /* always stop meta data stream last, so switch the stream object with the last one */
+        s_obj = s_objs[num_streams_to_stop - 1];
+        s_objs[num_streams_to_stop - 1] = s_objs[meta_stream_idx];
+        s_objs[meta_stream_idx] = s_obj;
+    }
+
+    for (i = 0; i < num_streams_to_stop; i++) {
+        /* stream that are linked to this channel should not be stopped */
+        if (s_objs[i]->ch_obj != my_obj) {
+            continue;
+        }
+
+        /* stream off */
+        mm_stream_fsm_fn(s_objs[i],
+                         MM_STREAM_EVT_STOP,
+                         NULL,
+                         NULL);
+
+        /* unreg buf at kernel */
+        mm_stream_fsm_fn(s_objs[i],
+                         MM_STREAM_EVT_UNREG_BUF,
+                         NULL,
+                         NULL);
+    }
+
+    for (i = 0; i < num_streams_to_stop; i++) {
+        if (s_objs[i]->ch_obj != my_obj) {
+            /* Only unlink stream */
+            pthread_mutex_lock(&s_objs[i]->linked_stream->buf_lock);
+            s_objs[i]->linked_stream->is_linked = 0;
+            s_objs[i]->linked_stream->linked_obj = NULL;
+            pthread_mutex_unlock(&s_objs[i]->linked_stream->buf_lock);
+
+            if (TRUE == my_obj->bundle.is_active) {
+                mm_channel_flush_super_buf_queue(my_obj, 0);
+            }
+            break;
+        } else {
+            continue;
+        }
+    }
+
+    /* destroy super buf cmd thread */
+    if (TRUE == my_obj->bundle.is_active) {
+        /* first stop bundle thread */
+        mm_camera_cmd_thread_release(&my_obj->cmd_thread);
+        mm_camera_cmd_thread_release(&my_obj->cb_thread);
+
+        /* deinit superbuf queue */
+        mm_channel_superbuf_queue_deinit(&my_obj->bundle.superbuf_queue);
+    }
+
+    /* since all streams are stopped, we are safe to
+     * release all buffers allocated in stream */
+    for (i = 0; i < num_streams_to_stop; i++) {
+        if (s_objs[i]->ch_obj != my_obj) {
+            continue;
+        }
+        /* put buf back */
+        mm_stream_fsm_fn(s_objs[i],
+                         MM_STREAM_EVT_PUT_BUF,
+                         NULL,
+                         NULL);
+    }
+
+    for (i = 0; i < num_streams_to_stop; i++) {
+        if (s_objs[i]->ch_obj != my_obj) {
+            memset(s_objs[i], 0, sizeof(mm_stream_t));
+        } else {
+            continue;
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_request_super_buf
+ *
+ * DESCRIPTION: for burst mode in bundle, reuqest certain amount of matched
+ *              frames from superbuf queue
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @num_buf_requested : number of matched frames needed
+ *   @num_retro_buf_requested : number of retro frames needed
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_request_super_buf(mm_channel_t *my_obj,
+               uint32_t num_buf_requested, uint32_t num_retro_buf_requested)
+{
+    int32_t rc = 0;
+    mm_camera_cmdcb_t* node = NULL;
+
+    /* set pending_cnt
+     * will trigger dispatching super frames if pending_cnt > 0 */
+    /* send cam_sem_post to wake up cmd thread to dispatch super buffer */
+    node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+    if (NULL != node) {
+        memset(node, 0, sizeof(mm_camera_cmdcb_t));
+        node->cmd_type = MM_CAMERA_CMD_TYPE_REQ_DATA_CB;
+        node->u.req_buf.num_buf_requested = num_buf_requested;
+        node->u.req_buf.num_retro_buf_requested = num_retro_buf_requested;
+
+        /* enqueue to cmd thread */
+        cam_queue_enq(&(my_obj->cmd_thread.cmd_queue), node);
+
+        /* wake up cmd thread */
+        cam_sem_post(&(my_obj->cmd_thread.cmd_sem));
+    } else {
+        CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+        rc = -1;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_cancel_super_buf_request
+ *
+ * DESCRIPTION: for burst mode in bundle, cancel the reuqest for certain amount
+ *              of matched frames from superbuf queue
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_cancel_super_buf_request(mm_channel_t *my_obj)
+{
+    int32_t rc = 0;
+    /* reset pending_cnt */
+    rc = mm_channel_request_super_buf(my_obj, 0, 0);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_flush_super_buf_queue
+ *
+ * DESCRIPTION: flush superbuf queue
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *   @frame_idx : frame idx until which to flush all superbufs
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_flush_super_buf_queue(mm_channel_t *my_obj, uint32_t frame_idx)
+{
+    int32_t rc = 0;
+    mm_camera_cmdcb_t* node = NULL;
+
+    node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+    if (NULL != node) {
+        memset(node, 0, sizeof(mm_camera_cmdcb_t));
+        node->cmd_type = MM_CAMERA_CMD_TYPE_FLUSH_QUEUE;
+        node->u.frame_idx = frame_idx;
+
+        /* enqueue to cmd thread */
+        cam_queue_enq(&(my_obj->cmd_thread.cmd_queue), node);
+
+        /* wake up cmd thread */
+        cam_sem_post(&(my_obj->cmd_thread.cmd_sem));
+    } else {
+        CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+        rc = -1;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_config_notify_mode
+ *
+ * DESCRIPTION: configure notification mode
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *   @notify_mode : notification mode
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_config_notify_mode(mm_channel_t *my_obj,
+                                      mm_camera_super_buf_notify_mode_t notify_mode)
+{
+    int32_t rc = 0;
+    mm_camera_cmdcb_t* node = NULL;
+
+    node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+    if (NULL != node) {
+        memset(node, 0, sizeof(mm_camera_cmdcb_t));
+        node->u.notify_mode = notify_mode;
+        node->cmd_type = MM_CAMERA_CMD_TYPE_CONFIG_NOTIFY;
+
+        /* enqueue to cmd thread */
+        cam_queue_enq(&(my_obj->cmd_thread.cmd_queue), node);
+
+        /* wake up cmd thread */
+        cam_sem_post(&(my_obj->cmd_thread.cmd_sem));
+    } else {
+        CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+        rc = -1;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_start_zsl_snapshot
+ *
+ * DESCRIPTION: start zsl snapshot
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_start_zsl_snapshot(mm_channel_t *my_obj)
+{
+    int32_t rc = 0;
+    mm_camera_cmdcb_t* node = NULL;
+
+    node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+    if (NULL != node) {
+        memset(node, 0, sizeof(mm_camera_cmdcb_t));
+        node->cmd_type = MM_CAMERA_CMD_TYPE_START_ZSL;
+
+        /* enqueue to cmd thread */
+        cam_queue_enq(&(my_obj->cmd_thread.cmd_queue), node);
+
+        /* wake up cmd thread */
+        cam_sem_post(&(my_obj->cmd_thread.cmd_sem));
+    } else {
+        CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+        rc = -1;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_stop_zsl_snapshot
+ *
+ * DESCRIPTION: stop zsl snapshot
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_stop_zsl_snapshot(mm_channel_t *my_obj)
+{
+    int32_t rc = 0;
+    mm_camera_cmdcb_t* node = NULL;
+
+    node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+    if (NULL != node) {
+        memset(node, 0, sizeof(mm_camera_cmdcb_t));
+        node->cmd_type = MM_CAMERA_CMD_TYPE_STOP_ZSL;
+
+        /* enqueue to cmd thread */
+        cam_queue_enq(&(my_obj->cmd_thread.cmd_queue), node);
+
+        /* wake up cmd thread */
+        cam_sem_post(&(my_obj->cmd_thread.cmd_sem));
+    } else {
+        CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+        rc = -1;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_qbuf
+ *
+ * DESCRIPTION: enqueue buffer back to kernel
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @buf          : buf ptr to be enqueued
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_qbuf(mm_channel_t *my_obj,
+                        mm_camera_buf_def_t *buf)
+{
+    int32_t rc = -1;
+    mm_stream_t* s_obj = mm_channel_util_get_stream_by_handler(my_obj, buf->stream_id);
+
+    if (NULL != s_obj) {
+        if (s_obj->ch_obj != my_obj) {
+            /* Redirect to linked stream */
+            rc = mm_stream_fsm_fn(s_obj->linked_stream,
+                    MM_STREAM_EVT_QBUF,
+                    (void *)buf,
+                    NULL);
+        } else {
+            rc = mm_stream_fsm_fn(s_obj,
+                    MM_STREAM_EVT_QBUF,
+                    (void *)buf,
+                    NULL);
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_get_queued_buf_count
+ *
+ * DESCRIPTION: return queued buffer count
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @stream_id    : steam_id
+ *
+ * RETURN     : queued buffer count
+ *==========================================================================*/
+int32_t mm_channel_get_queued_buf_count(mm_channel_t *my_obj, uint32_t stream_id)
+{
+    int32_t rc = -1;
+    mm_stream_t* s_obj = mm_channel_util_get_stream_by_handler(my_obj, stream_id);
+
+    if (NULL != s_obj) {
+        if (s_obj->ch_obj != my_obj) {
+            /* Redirect to linked stream */
+            rc = mm_stream_fsm_fn(s_obj->linked_stream,
+                    MM_STREAM_EVT_GET_QUEUED_BUF_COUNT,
+                    NULL,
+                    NULL);
+        } else {
+            rc = mm_stream_fsm_fn(s_obj,
+                    MM_STREAM_EVT_GET_QUEUED_BUF_COUNT,
+                    NULL,
+                    NULL);
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_set_stream_parms
+ *
+ * DESCRIPTION: set parameters per stream
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @s_id         : stream handle
+ *   @parms        : ptr to a param struct to be set to server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Corresponding fields of parameters to be set
+ *              are already filled in by upper layer caller.
+ *==========================================================================*/
+int32_t mm_channel_set_stream_parm(mm_channel_t *my_obj,
+                                   mm_evt_paylod_set_get_stream_parms_t *payload)
+{
+    int32_t rc = -1;
+    mm_stream_t* s_obj = mm_channel_util_get_stream_by_handler(my_obj,
+                                                               payload->stream_id);
+    if (NULL != s_obj) {
+        if (s_obj->ch_obj != my_obj) {
+            /* No op. on linked streams */
+            return 0;
+        }
+
+        rc = mm_stream_fsm_fn(s_obj,
+                              MM_STREAM_EVT_SET_PARM,
+                              (void *)payload,
+                              NULL);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_get_stream_parms
+ *
+ * DESCRIPTION: get parameters per stream
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @s_id         : stream handle
+ *   @parms        : ptr to a param struct to be get from server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Parameters to be get from server are already
+ *              filled in by upper layer caller. After this call, corresponding
+ *              fields of requested parameters will be filled in by server with
+ *              detailed information.
+ *==========================================================================*/
+int32_t mm_channel_get_stream_parm(mm_channel_t *my_obj,
+                                   mm_evt_paylod_set_get_stream_parms_t *payload)
+{
+    int32_t rc = -1;
+    mm_stream_t* s_obj = mm_channel_util_get_stream_by_handler(my_obj,
+                                                               payload->stream_id);
+    if (NULL != s_obj) {
+        if (s_obj->ch_obj != my_obj) {
+            /* No op. on linked streams */
+            return 0;
+        }
+
+        rc = mm_stream_fsm_fn(s_obj,
+                              MM_STREAM_EVT_GET_PARM,
+                              (void *)payload,
+                              NULL);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_do_stream_action
+ *
+ * DESCRIPTION: request server to perform stream based action. Maybe removed later
+ *              if the functionality is included in mm_camera_set_parms
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @s_id         : stream handle
+ *   @actions      : ptr to an action struct buf to be performed by server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the action struct buf is already mapped to server via
+ *              domain socket. Actions to be performed by server are already
+ *              filled in by upper layer caller.
+ *==========================================================================*/
+int32_t mm_channel_do_stream_action(mm_channel_t *my_obj,
+                                   mm_evt_paylod_do_stream_action_t *payload)
+{
+    int32_t rc = -1;
+    mm_stream_t* s_obj = mm_channel_util_get_stream_by_handler(my_obj,
+                                                               payload->stream_id);
+    if (NULL != s_obj) {
+        if (s_obj->ch_obj != my_obj) {
+            /* No op. on linked streams */
+            return 0;
+        }
+
+        rc = mm_stream_fsm_fn(s_obj,
+                              MM_STREAM_EVT_DO_ACTION,
+                              (void *)payload,
+                              NULL);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_map_stream_buf
+ *
+ * DESCRIPTION: mapping stream buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @payload      : ptr to payload for mapping
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_map_stream_buf(mm_channel_t *my_obj,
+                                  mm_evt_paylod_map_stream_buf_t *payload)
+{
+    int32_t rc = -1;
+    mm_stream_t* s_obj = mm_channel_util_get_stream_by_handler(my_obj,
+                                                               payload->stream_id);
+    if (NULL != s_obj) {
+        if (s_obj->ch_obj != my_obj) {
+            /* No op. on linked streams */
+            return 0;
+        }
+
+        rc = mm_stream_map_buf(s_obj,
+                               payload->buf_type,
+                               payload->buf_idx,
+                               payload->plane_idx,
+                               payload->fd,
+                               payload->size);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_unmap_stream_buf
+ *
+ * DESCRIPTION: unmapping stream buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @my_obj       : channel object
+ *   @payload      : ptr to unmap payload
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_unmap_stream_buf(mm_channel_t *my_obj,
+                                    mm_evt_paylod_unmap_stream_buf_t *payload)
+{
+    int32_t rc = -1;
+    mm_stream_t* s_obj = mm_channel_util_get_stream_by_handler(my_obj,
+                                                               payload->stream_id);
+    if (NULL != s_obj) {
+        if (s_obj->ch_obj != my_obj) {
+            /* No op. on linked streams */
+            return 0;
+        }
+
+        rc = mm_stream_unmap_buf(s_obj, payload->buf_type,
+                                 payload->buf_idx, payload->plane_idx);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_superbuf_queue_init
+ *
+ * DESCRIPTION: initialize superbuf queue in the channel
+ *
+ * PARAMETERS :
+ *   @queue   : ptr to superbuf queue to be initialized
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_superbuf_queue_init(mm_channel_queue_t * queue)
+{
+    return cam_queue_init(&queue->que);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_superbuf_queue_deinit
+ *
+ * DESCRIPTION: deinitialize superbuf queue in the channel
+ *
+ * PARAMETERS :
+ *   @queue   : ptr to superbuf queue to be deinitialized
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_superbuf_queue_deinit(mm_channel_queue_t * queue)
+{
+    return cam_queue_deinit(&queue->que);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_util_seq_comp_w_rollover
+ *
+ * DESCRIPTION: utility function to handle sequence number comparison with rollover
+ *
+ * PARAMETERS :
+ *   @v1      : first value to be compared
+ *   @v2      : second value to be compared
+ *
+ * RETURN     : int8_t type of comparison result
+ *              >0  -- v1 larger than v2
+ *              =0  -- vi equal to v2
+ *              <0  -- v1 smaller than v2
+ *==========================================================================*/
+int8_t mm_channel_util_seq_comp_w_rollover(uint32_t v1,
+                                           uint32_t v2)
+{
+    int8_t ret = 0;
+
+    /* TODO: need to handle the case if v2 roll over to 0 */
+    if (v1 > v2) {
+        ret = 1;
+    } else if (v1 < v2) {
+        ret = -1;
+    }
+
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_handle_metadata
+ *
+ * DESCRIPTION: Handle frame matching logic change due to metadata
+ *
+ * PARAMETERS :
+ *   @ch_obj  : channel object
+ *   @queue   : superbuf queue
+ *   @buf_info: new buffer from stream
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_handle_metadata(
+                        mm_channel_t* ch_obj,
+                        mm_channel_queue_t * queue,
+                        mm_camera_buf_info_t *buf_info)
+{
+
+    int rc = 0 ;
+    mm_stream_t* stream_obj = NULL;
+    stream_obj = mm_channel_util_get_stream_by_handler(ch_obj,
+                buf_info->stream_id);
+    uint8_t is_prep_snapshot_done_valid = 0;
+    uint8_t is_good_frame_idx_range_valid = 0;
+    int32_t prep_snapshot_done_state = 0;
+    cam_frame_idx_range_t good_frame_idx_range;
+    uint8_t is_crop_1x_found = 0;
+    uint32_t snapshot_stream_id = 0;
+    uint32_t i;
+    /* Set expected frame id to a future frame idx, large enough to wait
+    * for good_frame_idx_range, and small enough to still capture an image */
+    const uint32_t max_future_frame_offset = 100U;
+
+    memset(&good_frame_idx_range, 0, sizeof(good_frame_idx_range));
+
+    if (NULL == stream_obj) {
+        CDBG_ERROR("%s: Invalid Stream Object for stream_id = %d",
+                   __func__, buf_info->stream_id);
+        rc = -1;
+        goto end;
+    }
+    if (NULL == stream_obj->stream_info) {
+        CDBG_ERROR("%s: NULL stream info for stream_id = %d",
+                    __func__, buf_info->stream_id);
+        rc = -1;
+        goto end;
+    }
+
+    if ((CAM_STREAM_TYPE_METADATA == stream_obj->stream_info->stream_type) &&
+            (stream_obj->ch_obj == ch_obj)) {
+        const metadata_buffer_t *metadata;
+        metadata = (const metadata_buffer_t *)buf_info->buf->buffer;
+
+        if (NULL == metadata) {
+            CDBG_ERROR("%s: NULL metadata buffer for metadata stream",
+                       __func__);
+            rc = -1;
+            goto end;
+        }
+        CDBG("%s: E , expected frame id: %d", __func__, queue->expected_frame_id);
+
+        IF_META_AVAILABLE(const int32_t, p_prep_snapshot_done_state,
+                CAM_INTF_META_PREP_SNAPSHOT_DONE, metadata) {
+            prep_snapshot_done_state = *p_prep_snapshot_done_state;
+            is_prep_snapshot_done_valid = 1;
+            CDBG("%s: prepare snapshot done valid ", __func__);
+        }
+        IF_META_AVAILABLE(const cam_frame_idx_range_t, p_good_frame_idx_range,
+                CAM_INTF_META_GOOD_FRAME_IDX_RANGE, metadata) {
+            good_frame_idx_range = *p_good_frame_idx_range;
+            is_good_frame_idx_range_valid = 1;
+            CDBG("%s: good_frame_idx_range : min: %d, max: %d , num frames = %d",
+                __func__, good_frame_idx_range.min_frame_idx,
+                good_frame_idx_range.max_frame_idx, good_frame_idx_range.num_led_on_frames);
+        }
+        IF_META_AVAILABLE(const cam_crop_data_t, p_crop_data,
+                CAM_INTF_META_CROP_DATA, metadata) {
+            cam_crop_data_t crop_data = *p_crop_data;
+
+            for (i = 0; i < ARRAY_SIZE(ch_obj->streams); i++) {
+                if (MM_STREAM_STATE_NOTUSED == ch_obj->streams[i].state) {
+                    continue;
+                }
+                if (CAM_STREAM_TYPE_SNAPSHOT ==
+                    ch_obj->streams[i].stream_info->stream_type) {
+                    snapshot_stream_id = ch_obj->streams[i].server_stream_id;
+                    break;
+                }
+            }
+
+            for (i=0; i<crop_data.num_of_streams; i++) {
+                if (snapshot_stream_id == crop_data.crop_info[i].stream_id) {
+                    if (!crop_data.crop_info[i].crop.left &&
+                            !crop_data.crop_info[i].crop.top) {
+                        is_crop_1x_found = 1;
+                        break;
+                    }
+                }
+            }
+        }
+
+        IF_META_AVAILABLE(const cam_buf_divert_info_t, divert_info,
+                CAM_INTF_BUF_DIVERT_INFO, metadata) {
+            if (divert_info->frame_id >= buf_info->frame_idx) {
+                ch_obj->diverted_frame_id = divert_info->frame_id;
+            } else {
+                ch_obj->diverted_frame_id = 0;
+            }
+        }
+
+        if (ch_obj->isZoom1xFrameRequested) {
+            if (is_crop_1x_found) {
+                ch_obj->isZoom1xFrameRequested = 0;
+                queue->expected_frame_id = buf_info->frame_idx + 1;
+            } else {
+                queue->expected_frame_id += max_future_frame_offset;
+                /* Flush unwanted frames */
+                mm_channel_superbuf_flush_matched(ch_obj, queue);
+            }
+            goto end;
+        }
+
+        if (is_prep_snapshot_done_valid) {
+            ch_obj->bWaitForPrepSnapshotDone = 0;
+            if (prep_snapshot_done_state == NEED_FUTURE_FRAME) {
+                queue->expected_frame_id += max_future_frame_offset;
+                CDBG("%s: [ZSL Retro] NEED_FUTURE_FRAME, expected frame id = %d ",
+                        __func__,  queue->expected_frame_id);
+
+                mm_channel_superbuf_flush(ch_obj,
+                        queue, CAM_STREAM_TYPE_DEFAULT);
+
+                ch_obj->needLEDFlash = TRUE;
+            } else {
+                ch_obj->needLEDFlash = FALSE;
+            }
+        }
+        if (is_good_frame_idx_range_valid) {
+            if (good_frame_idx_range.min_frame_idx > queue->expected_frame_id) {
+                CDBG_HIGH("%s: [ZSL Retro] min_frame_idx %d is greater than expected_frame_id %d",
+                        __func__, good_frame_idx_range.min_frame_idx, queue->expected_frame_id);
+            }
+            queue->expected_frame_id =
+                good_frame_idx_range.min_frame_idx;
+             if((ch_obj->needLEDFlash == TRUE) && (ch_obj->burstSnapNum > 1)) {
+                queue->led_on_start_frame_id =
+                good_frame_idx_range.min_frame_idx;
+                queue->led_off_start_frame_id =
+                good_frame_idx_range.max_frame_idx;
+                queue->once = 0;
+                queue->led_on_num_frames =
+                  good_frame_idx_range.num_led_on_frames;
+                queue->frame_skip_count = good_frame_idx_range.frame_skip_count;
+                CDBG("%s: [ZSL Retro] Need Flash, expected frame id = %d,"
+                        " led_on start = %d, led off start = %d, led on frames = %d ",
+                        __func__,   queue->expected_frame_id, queue->led_on_start_frame_id,
+                        queue->led_off_start_frame_id, queue->led_on_num_frames);
+            } else {
+                CDBG("%s: [ZSL Retro]No flash, expected frame id = %d ",
+                        __func__, queue->expected_frame_id);
+            }
+        } else if ((MM_CHANNEL_BRACKETING_STATE_WAIT_GOOD_FRAME_IDX == ch_obj->bracketingState) &&
+                !is_prep_snapshot_done_valid) {
+            /* Flush unwanted frames */
+            mm_channel_superbuf_flush_matched(ch_obj, queue);
+            queue->expected_frame_id += max_future_frame_offset;
+        }
+        if (ch_obj->isFlashBracketingEnabled &&
+            is_good_frame_idx_range_valid) {
+            /* Flash bracketing needs two frames, with & without led flash.
+            * in valid range min frame is with led flash and max frame is
+            * without led flash */
+            queue->expected_frame_id =
+                good_frame_idx_range.min_frame_idx;
+            /* max frame is without led flash */
+            queue->expected_frame_id_without_led =
+                good_frame_idx_range.max_frame_idx;
+
+        } else if (is_good_frame_idx_range_valid) {
+            if (good_frame_idx_range.min_frame_idx >
+                queue->expected_frame_id) {
+                CDBG_HIGH("%s: min_frame_idx %d is greater than expected_frame_id %d",
+                        __func__, good_frame_idx_range.min_frame_idx,
+                        queue->expected_frame_id);
+            }
+            queue->expected_frame_id =
+                    good_frame_idx_range.min_frame_idx;
+
+            ch_obj->bracketingState = MM_CHANNEL_BRACKETING_STATE_ACTIVE;
+        }
+
+        if (ch_obj->frame_config != NULL && is_good_frame_idx_range_valid
+                && (good_frame_idx_range.config_batch_idx < ch_obj->frame_config->num_batch)) {
+            CDBG_HIGH("Frame Config: Expcted ID = %d batch index = %d",
+                    good_frame_idx_range.min_frame_idx, good_frame_idx_range.config_batch_idx);
+            queue->expected_frame_id =
+                    good_frame_idx_range.min_frame_idx;
+            if (ch_obj->frame_config->configs[ch_obj->cur_capture_idx].num_frames != 0) {
+                CDBG_ERROR("Drop in frame or early PIC Done evet frame-id = %d",
+                        buf_info->frame_idx);
+            }
+            ch_obj->cur_capture_idx = good_frame_idx_range.config_batch_idx;
+        }
+
+        if ((ch_obj->burstSnapNum > 1) && (ch_obj->needLEDFlash == TRUE)
+            && !ch_obj->isFlashBracketingEnabled
+            && (MM_CHANNEL_BRACKETING_STATE_OFF == ch_obj->bracketingState)
+            && ch_obj->frame_config == NULL) {
+            if((buf_info->frame_idx >= queue->led_off_start_frame_id)
+                    &&  !queue->once) {
+                CDBG("%s: [ZSL Retro]Burst snap num = %d ",
+                        __func__, ch_obj->burstSnapNum);
+                // Skip frames from LED OFF frame to get a good frame
+                queue->expected_frame_id = queue->led_off_start_frame_id +
+                        queue->frame_skip_count;
+                queue->once = 1;
+                ch_obj->stopZslSnapshot = 1;
+                ch_obj->needLEDFlash = FALSE;
+                CDBG("%s:[ZSL Retro]Reached max led on frames = %d , expected id = %d",
+                        __func__, buf_info->frame_idx, queue->expected_frame_id);
+         }
+       }
+    }
+end:
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_superbuf_comp_and_enqueue
+ *
+ * DESCRIPTION: implementation for matching logic for superbuf
+ *
+ * PARAMETERS :
+ *   @ch_obj  : channel object
+ *   @queue   : superbuf queue
+ *   @buf_info: new buffer from stream
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_superbuf_comp_and_enqueue(
+                        mm_channel_t* ch_obj,
+                        mm_channel_queue_t *queue,
+                        mm_camera_buf_info_t *buf_info)
+{
+    cam_node_t* node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+    mm_channel_queue_node_t* super_buf = NULL;
+    uint8_t buf_s_idx, i, found_super_buf, unmatched_bundles;
+    struct cam_list *last_buf, *insert_before_buf, *last_buf_ptr;
+
+    CDBG("%s: E", __func__);
+
+    for (buf_s_idx = 0; buf_s_idx < queue->num_streams; buf_s_idx++) {
+        if (buf_info->stream_id == queue->bundled_streams[buf_s_idx]) {
+            break;
+        }
+    }
+
+    if (buf_s_idx == queue->num_streams) {
+        CDBG_ERROR("%s: buf from stream (%d) not bundled", __func__, buf_info->stream_id);
+        return -1;
+    }
+
+    if(buf_info->frame_idx == 0) {
+        mm_channel_qbuf(ch_obj, buf_info->buf);
+        return 0;
+    }
+
+    if (mm_channel_handle_metadata(ch_obj, queue, buf_info) < 0) {
+        mm_channel_qbuf(ch_obj, buf_info->buf);
+        return -1;
+    }
+
+    if (mm_channel_util_seq_comp_w_rollover(buf_info->frame_idx,
+                                            queue->expected_frame_id) < 0) {
+        /* incoming buf is older than expected buf id, will discard it */
+        mm_channel_qbuf(ch_obj, buf_info->buf);
+        return 0;
+    }
+
+    if((queue->nomatch_frame_id != 0)
+            && (queue->nomatch_frame_id > buf_info->frame_idx)
+            && (buf_info->buf->stream_type == CAM_STREAM_TYPE_METADATA)) {
+        /*Incoming metadata is older than expected*/
+        mm_channel_qbuf(ch_obj, buf_info->buf);
+        return 0;
+    }
+
+    /* comp */
+    pthread_mutex_lock(&queue->que.lock);
+    head = &queue->que.head.list;
+    /* get the last one in the queue which is possibly having no matching */
+    pos = head->next;
+
+    found_super_buf = 0;
+    unmatched_bundles = 0;
+    last_buf = NULL;
+    insert_before_buf = NULL;
+    last_buf_ptr = NULL;
+
+    while (pos != head) {
+        node = member_of(pos, cam_node_t, list);
+        super_buf = (mm_channel_queue_node_t*)node->data;
+
+        if (NULL != super_buf) {
+            if (super_buf->matched) {
+                /* find a matched super buf, move to next one */
+                pos = pos->next;
+                continue;
+            } else if ( buf_info->frame_idx == super_buf->frame_idx
+                    /*Pick metadata greater than available frameID*/
+                    || ((queue->nomatch_frame_id != 0)
+                    && (queue->nomatch_frame_id <= buf_info->frame_idx)
+                    && (super_buf->super_buf[buf_s_idx].frame_idx == 0)
+                    && (buf_info->buf->stream_type == CAM_STREAM_TYPE_METADATA))
+                    /*Pick available metadata closest to frameID*/
+                    || ((queue->attr.priority == MM_CAMERA_SUPER_BUF_PRIORITY_LOW)
+                    && (buf_info->buf->stream_type != CAM_STREAM_TYPE_METADATA)
+                    && (super_buf->super_buf[buf_s_idx].frame_idx == 0)
+                    && (super_buf->frame_idx > buf_info->frame_idx))){
+                /*super buffer frame IDs matching OR In low priority bundling
+                metadata frameID greater than avialbale super buffer frameID  OR
+                metadata frame closest to incoming frameID will be bundled*/
+                found_super_buf = 1;
+                queue->nomatch_frame_id = 0;
+                break;
+            } else {
+                unmatched_bundles++;
+                if ( NULL == last_buf ) {
+                    if ( super_buf->frame_idx < buf_info->frame_idx ) {
+                        last_buf = pos;
+                    }
+                }
+                if ( NULL == insert_before_buf ) {
+                    if ( super_buf->frame_idx > buf_info->frame_idx ) {
+                        insert_before_buf = pos;
+                    }
+                }
+                pos = pos->next;
+            }
+        }
+    }
+
+    if ( found_super_buf ) {
+
+        if(super_buf->super_buf[buf_s_idx].frame_idx != 0) {
+            //This can cause frame drop. We are overwriting same memory.
+            pthread_mutex_unlock(&queue->que.lock);
+            //CDBG_FATAL("FATAL: frame is already in camera ZSL queue");
+            CDBG_ERROR("***FATAL: frame is already in camera ZSL queue***");
+            mm_channel_qbuf(ch_obj, buf_info->buf);
+            return 0;
+        }
+
+        /*Insert incoming buffer to super buffer*/
+        super_buf->super_buf[buf_s_idx] = *buf_info;
+
+        /* check if superbuf is all matched */
+        super_buf->matched = 1;
+        for (i=0; i < super_buf->num_of_bufs; i++) {
+            if (super_buf->super_buf[i].frame_idx == 0) {
+                super_buf->matched = 0;
+                break;
+            }
+        }
+
+        if (super_buf->matched) {
+            if(ch_obj->isFlashBracketingEnabled) {
+               queue->expected_frame_id =
+                   queue->expected_frame_id_without_led;
+               if (buf_info->frame_idx >=
+                       queue->expected_frame_id_without_led) {
+                   ch_obj->isFlashBracketingEnabled = FALSE;
+               }
+            } else {
+               queue->expected_frame_id = buf_info->frame_idx
+                                          + queue->attr.post_frame_skip;
+            }
+
+            super_buf->expected = FALSE;
+
+            CDBG("%s: curr = %d, skip = %d , Expected Frame ID: %d",
+                    __func__, buf_info->frame_idx,
+                    queue->attr.post_frame_skip, queue->expected_frame_id);
+
+            queue->match_cnt++;
+
+            /* Any older unmatched buffer need to be released */
+            if ( last_buf ) {
+                while ( last_buf != pos ) {
+                    node = member_of(last_buf, cam_node_t, list);
+                    super_buf = (mm_channel_queue_node_t*)node->data;
+                    if (NULL != super_buf) {
+                        for (i=0; i<super_buf->num_of_bufs; i++) {
+                            if (super_buf->super_buf[i].frame_idx != 0) {
+                                mm_channel_qbuf(ch_obj, super_buf->super_buf[i].buf);
+                            }
+                        }
+                        queue->que.size--;
+                        last_buf = last_buf->next;
+                        cam_list_del_node(&node->list);
+                        free(node);
+                        free(super_buf);
+                    } else {
+                        CDBG_ERROR(" %s : Invalid superbuf in queue!", __func__);
+                        break;
+                    }
+                }
+            }
+        }else {
+            if (ch_obj->diverted_frame_id == buf_info->frame_idx) {
+                super_buf->expected = TRUE;
+                ch_obj->diverted_frame_id = 0;
+            }
+        }
+    } else {
+        if ((queue->attr.max_unmatched_frames < unmatched_bundles)
+                && ( NULL == last_buf )) {
+            /* incoming frame is older than the last bundled one */
+            mm_channel_qbuf(ch_obj, buf_info->buf);
+        } else {
+            last_buf_ptr = last_buf;
+
+            /* Loop to remove unmatched frames */
+            while ((queue->attr.max_unmatched_frames < unmatched_bundles)
+                    && (last_buf_ptr != NULL && last_buf_ptr != pos)) {
+                node = member_of(last_buf_ptr, cam_node_t, list);
+                super_buf = (mm_channel_queue_node_t*)node->data;
+                if (NULL != super_buf && super_buf->expected == FALSE
+                        && (&node->list != insert_before_buf)) {
+                    for (i=0; i<super_buf->num_of_bufs; i++) {
+                        if (super_buf->super_buf[i].frame_idx != 0) {
+                            mm_channel_qbuf(ch_obj, super_buf->super_buf[i].buf);
+                        }
+                    }
+                    queue->que.size--;
+                    cam_list_del_node(&node->list);
+                    free(node);
+                    free(super_buf);
+                    unmatched_bundles--;
+                }
+                last_buf_ptr = last_buf_ptr->next;
+            }
+
+            if (queue->attr.max_unmatched_frames < unmatched_bundles) {
+                node = member_of(last_buf, cam_node_t, list);
+                super_buf = (mm_channel_queue_node_t*)node->data;
+                for (i=0; i<super_buf->num_of_bufs; i++) {
+                    if (super_buf->super_buf[i].frame_idx != 0) {
+                        mm_channel_qbuf(ch_obj, super_buf->super_buf[i].buf);
+                    }
+                }
+                queue->que.size--;
+                cam_list_del_node(&node->list);
+                free(node);
+                free(super_buf);
+            }
+
+            /* insert the new frame at the appropriate position. */
+
+            mm_channel_queue_node_t *new_buf = NULL;
+            cam_node_t* new_node = NULL;
+
+            new_buf = (mm_channel_queue_node_t*)malloc(sizeof(mm_channel_queue_node_t));
+            new_node = (cam_node_t*)malloc(sizeof(cam_node_t));
+            if (NULL != new_buf && NULL != new_node) {
+                memset(new_buf, 0, sizeof(mm_channel_queue_node_t));
+                memset(new_node, 0, sizeof(cam_node_t));
+                new_node->data = (void *)new_buf;
+                new_buf->num_of_bufs = queue->num_streams;
+                new_buf->super_buf[buf_s_idx] = *buf_info;
+                new_buf->frame_idx = buf_info->frame_idx;
+
+                if (ch_obj->diverted_frame_id == buf_info->frame_idx) {
+                    new_buf->expected = TRUE;
+                    ch_obj->diverted_frame_id = 0;
+                }
+
+                /* enqueue */
+                if ( insert_before_buf ) {
+                    cam_list_insert_before_node(&new_node->list, insert_before_buf);
+                } else {
+                    cam_list_add_tail_node(&new_node->list, &queue->que.head.list);
+                }
+                queue->que.size++;
+
+                if(queue->num_streams == 1) {
+                    new_buf->matched = 1;
+                    new_buf->expected = FALSE;
+                    queue->expected_frame_id = buf_info->frame_idx + queue->attr.post_frame_skip;
+                    queue->match_cnt++;
+                }
+
+                if ((queue->attr.priority == MM_CAMERA_SUPER_BUF_PRIORITY_LOW)
+                        && (buf_info->buf->stream_type != CAM_STREAM_TYPE_METADATA)) {
+                    CDBG_ERROR ("%s : No metadata matching for frame = %d",
+                            __func__, buf_info->frame_idx);
+                    queue->nomatch_frame_id = buf_info->frame_idx;
+                }
+            } else {
+                /* No memory */
+                if (NULL != new_buf) {
+                    free(new_buf);
+                }
+                if (NULL != new_node) {
+                    free(new_node);
+                }
+                /* qbuf the new buf since we cannot enqueue */
+                mm_channel_qbuf(ch_obj, buf_info->buf);
+            }
+        }
+    }
+
+    pthread_mutex_unlock(&queue->que.lock);
+    CDBG("%s: X", __func__);
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_superbuf_dequeue_internal
+ *
+ * DESCRIPTION: internal implementation for dequeue from the superbuf queue
+ *
+ * PARAMETERS :
+ *   @queue   : superbuf queue
+ *   @matched_only : if dequeued buf should be matched
+ *
+ * RETURN     : ptr to a node from superbuf queue
+ *==========================================================================*/
+mm_channel_queue_node_t* mm_channel_superbuf_dequeue_internal(mm_channel_queue_t * queue,
+                                                              uint8_t matched_only)
+{
+    cam_node_t* node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+    mm_channel_queue_node_t* super_buf = NULL;
+
+    head = &queue->que.head.list;
+    pos = head->next;
+    if (pos != head) {
+        /* get the first node */
+        node = member_of(pos, cam_node_t, list);
+        super_buf = (mm_channel_queue_node_t*)node->data;
+        if ( (NULL != super_buf) &&
+             (matched_only == TRUE) &&
+             (super_buf->matched == FALSE) ) {
+            /* require to dequeue matched frame only, but this superbuf is not matched,
+               simply set return ptr to NULL */
+            super_buf = NULL;
+        }
+        if (NULL != super_buf) {
+            /* remove from the queue */
+            cam_list_del_node(&node->list);
+            queue->que.size--;
+            if (super_buf->matched == TRUE) {
+                queue->match_cnt--;
+            }
+            free(node);
+        }
+    }
+
+    return super_buf;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_superbuf_dequeue
+ *
+ * DESCRIPTION: dequeue from the superbuf queue
+ *
+ * PARAMETERS :
+ *   @queue   : superbuf queue
+ *
+ * RETURN     : ptr to a node from superbuf queue
+ *==========================================================================*/
+mm_channel_queue_node_t* mm_channel_superbuf_dequeue(mm_channel_queue_t * queue)
+{
+    mm_channel_queue_node_t* super_buf = NULL;
+
+    pthread_mutex_lock(&queue->que.lock);
+    super_buf = mm_channel_superbuf_dequeue_internal(queue, TRUE);
+    pthread_mutex_unlock(&queue->que.lock);
+
+    return super_buf;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_superbuf_bufdone_overflow
+ *
+ * DESCRIPTION: keep superbuf queue no larger than watermark set by upper layer
+ *              via channel attribute
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *   @queue   : superbuf queue
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_superbuf_bufdone_overflow(mm_channel_t* my_obj,
+                                             mm_channel_queue_t * queue)
+{
+    int32_t rc = 0, i;
+    mm_channel_queue_node_t* super_buf = NULL;
+    if (MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS == queue->attr.notify_mode) {
+        /* for continuous streaming mode, no overflow is needed */
+        return 0;
+    }
+
+    CDBG("%s: before match_cnt=%d, water_mark=%d",
+         __func__, queue->match_cnt, queue->attr.water_mark);
+    /* bufdone overflowed bufs */
+    pthread_mutex_lock(&queue->que.lock);
+    while (queue->match_cnt > queue->attr.water_mark) {
+        super_buf = mm_channel_superbuf_dequeue_internal(queue, TRUE);
+        if (NULL != super_buf) {
+            for (i=0; i<super_buf->num_of_bufs; i++) {
+                if (NULL != super_buf->super_buf[i].buf) {
+                    mm_channel_qbuf(my_obj, super_buf->super_buf[i].buf);
+                }
+            }
+            free(super_buf);
+        }
+    }
+    pthread_mutex_unlock(&queue->que.lock);
+    CDBG("%s: after match_cnt=%d, water_mark=%d",
+         __func__, queue->match_cnt, queue->attr.water_mark);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_superbuf_skip
+ *
+ * DESCRIPTION: depends on the lookback configuration of the channel attribute,
+ *              unwanted superbufs will be removed from the superbuf queue.
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *   @queue   : superbuf queue
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_superbuf_skip(mm_channel_t* my_obj,
+                                 mm_channel_queue_t * queue)
+{
+    int32_t rc = 0, i;
+    mm_channel_queue_node_t* super_buf = NULL;
+    if (MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS == queue->attr.notify_mode) {
+        /* for continuous streaming mode, no skip is needed */
+        return 0;
+    }
+
+    /* bufdone overflowed bufs */
+    pthread_mutex_lock(&queue->que.lock);
+    while (queue->match_cnt > queue->attr.look_back) {
+        super_buf = mm_channel_superbuf_dequeue_internal(queue, TRUE);
+        if (NULL != super_buf) {
+            for (i=0; i<super_buf->num_of_bufs; i++) {
+                if (NULL != super_buf->super_buf[i].buf) {
+                    mm_channel_qbuf(my_obj, super_buf->super_buf[i].buf);
+                }
+            }
+            free(super_buf);
+        }
+    }
+    pthread_mutex_unlock(&queue->que.lock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_superbuf_flush
+ *
+ * DESCRIPTION: flush the superbuf queue.
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *   @queue   : superbuf queue
+ *   @cam_type: flush only particular type (default flushes all)
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_superbuf_flush(mm_channel_t* my_obj,
+        mm_channel_queue_t * queue, cam_stream_type_t cam_type)
+{
+    int32_t rc = 0, i;
+    mm_channel_queue_node_t* super_buf = NULL;
+    cam_stream_type_t stream_type = CAM_STREAM_TYPE_DEFAULT;
+
+    /* bufdone bufs */
+    pthread_mutex_lock(&queue->que.lock);
+    super_buf = mm_channel_superbuf_dequeue_internal(queue, FALSE);
+    while (super_buf != NULL) {
+        for (i=0; i<super_buf->num_of_bufs; i++) {
+            if (NULL != super_buf->super_buf[i].buf) {
+                stream_type = super_buf->super_buf[i].buf->stream_type;
+                if ((CAM_STREAM_TYPE_DEFAULT == cam_type) ||
+                        (cam_type == stream_type)) {
+                    mm_channel_qbuf(my_obj, super_buf->super_buf[i].buf);
+                }
+            }
+        }
+        free(super_buf);
+        super_buf = mm_channel_superbuf_dequeue_internal(queue, FALSE);
+    }
+    pthread_mutex_unlock(&queue->que.lock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_proc_general_cmd
+ *
+ * DESCRIPTION: process general command
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *   @notify_mode : notification mode
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_proc_general_cmd(mm_channel_t *my_obj,
+                                      mm_camera_generic_cmd_t *p_gen_cmd)
+{
+    CDBG("%s: E",__func__);
+    int32_t rc = 0;
+    mm_camera_cmdcb_t* node = NULL;
+
+    node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+    if (NULL != node) {
+        memset(node, 0, sizeof(mm_camera_cmdcb_t));
+        node->u.gen_cmd = *p_gen_cmd;
+        node->cmd_type = MM_CAMERA_CMD_TYPE_GENERAL;
+
+        /* enqueue to cmd thread */
+        cam_queue_enq(&(my_obj->cmd_thread.cmd_queue), node);
+
+        /* wake up cmd thread */
+        cam_sem_post(&(my_obj->cmd_thread.cmd_sem));
+    } else {
+        CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+        rc = -1;
+    }
+    CDBG("%s: X",__func__);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_channel_superbuf_flush_matched
+ *
+ * DESCRIPTION: flush matched buffers from the superbuf queue.
+ *
+ * PARAMETERS :
+ *   @my_obj  : channel object
+ *   @queue   : superbuf queue
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_channel_superbuf_flush_matched(mm_channel_t* my_obj,
+                                  mm_channel_queue_t * queue)
+{
+    int32_t rc = 0, i;
+    mm_channel_queue_node_t* super_buf = NULL;
+
+    /* bufdone bufs */
+    pthread_mutex_lock(&queue->que.lock);
+    super_buf = mm_channel_superbuf_dequeue_internal(queue, TRUE);
+    while (super_buf != NULL) {
+        for (i=0; i<super_buf->num_of_bufs; i++) {
+            if (NULL != super_buf->super_buf[i].buf) {
+                mm_channel_qbuf(my_obj, super_buf->super_buf[i].buf);
+            }
+        }
+        free(super_buf);
+        super_buf = mm_channel_superbuf_dequeue_internal(queue, TRUE);
+    }
+    pthread_mutex_unlock(&queue->que.lock);
+
+    return rc;
+}
diff --git a/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_interface.c b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_interface.c
new file mode 100644
index 0000000..1a243a9
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_interface.c
@@ -0,0 +1,1831 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <linux/media.h>
+#include <signal.h>
+#include <media/msm_cam_sensor.h>
+#include <cutils/properties.h>
+#include <stdlib.h>
+
+#include "mm_camera_dbg.h"
+#include "mm_camera_interface.h"
+#include "mm_camera_sock.h"
+#include "mm_camera.h"
+
+static pthread_mutex_t g_intf_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static mm_camera_ctrl_t g_cam_ctrl = {0, {{0}}, {0}, {{0}}};
+
+static pthread_mutex_t g_handler_lock = PTHREAD_MUTEX_INITIALIZER;
+static uint16_t g_handler_history_count = 0; /* history count for handler */
+volatile uint32_t gMmCameraIntfLogLevel = 1;
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_generate_handler
+ *
+ * DESCRIPTION: utility function to generate handler for camera/channel/stream
+ *
+ * PARAMETERS :
+ *   @index: index of the object to have handler
+ *
+ * RETURN     : uint32_t type of handle that uniquely identify the object
+ *==========================================================================*/
+uint32_t mm_camera_util_generate_handler(uint8_t index)
+{
+    uint32_t handler = 0;
+    pthread_mutex_lock(&g_handler_lock);
+    g_handler_history_count++;
+    if (0 == g_handler_history_count) {
+        g_handler_history_count++;
+    }
+    handler = g_handler_history_count;
+    handler = (handler<<8) | index;
+    pthread_mutex_unlock(&g_handler_lock);
+    return handler;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_get_index_by_handler
+ *
+ * DESCRIPTION: utility function to get index from handle
+ *
+ * PARAMETERS :
+ *   @handler: object handle
+ *
+ * RETURN     : uint8_t type of index derived from handle
+ *==========================================================================*/
+uint8_t mm_camera_util_get_index_by_handler(uint32_t handler)
+{
+    return (handler&0x000000ff);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_get_dev_name
+ *
+ * DESCRIPTION: utility function to get device name from camera handle
+ *
+ * PARAMETERS :
+ *   @cam_handle: camera handle
+ *
+ * RETURN     : char ptr to the device name stored in global variable
+ * NOTE       : caller should not free the char ptr
+ *==========================================================================*/
+const char *mm_camera_util_get_dev_name(uint32_t cam_handle)
+{
+    char *dev_name = NULL;
+    uint8_t cam_idx = mm_camera_util_get_index_by_handler(cam_handle);
+    if(cam_idx < MM_CAMERA_MAX_NUM_SENSORS) {
+        dev_name = g_cam_ctrl.video_dev_name[cam_idx];
+    }
+    return dev_name;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_util_get_camera_by_handler
+ *
+ * DESCRIPTION: utility function to get camera object from camera handle
+ *
+ * PARAMETERS :
+ *   @cam_handle: camera handle
+ *
+ * RETURN     : ptr to the camera object stored in global variable
+ * NOTE       : caller should not free the camera object ptr
+ *==========================================================================*/
+mm_camera_obj_t* mm_camera_util_get_camera_by_handler(uint32_t cam_handle)
+{
+    mm_camera_obj_t *cam_obj = NULL;
+    uint8_t cam_idx = mm_camera_util_get_index_by_handler(cam_handle);
+
+    if (cam_idx < MM_CAMERA_MAX_NUM_SENSORS &&
+        (NULL != g_cam_ctrl.cam_obj[cam_idx]) &&
+        (cam_handle == g_cam_ctrl.cam_obj[cam_idx]->my_hdl)) {
+        cam_obj = g_cam_ctrl.cam_obj[cam_idx];
+    }
+    return cam_obj;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_query_capability
+ *
+ * DESCRIPTION: query camera capability
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_query_capability(uint32_t camera_handle)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s E: camera_handler = %d ", __func__, camera_handle);
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_query_capability(my_obj);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_set_parms
+ *
+ * DESCRIPTION: set parameters per camera
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @parms        : ptr to a param struct to be set to server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Corresponding fields of parameters to be set
+ *              are already filled in by upper layer caller.
+ *==========================================================================*/
+static int32_t mm_camera_intf_set_parms(uint32_t camera_handle,
+                                        parm_buffer_t *parms)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_set_parms(my_obj, parms);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_get_parms
+ *
+ * DESCRIPTION: get parameters per camera
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @parms        : ptr to a param struct to be get from server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Parameters to be get from server are already
+ *              filled in by upper layer caller. After this call, corresponding
+ *              fields of requested parameters will be filled in by server with
+ *              detailed information.
+ *==========================================================================*/
+static int32_t mm_camera_intf_get_parms(uint32_t camera_handle,
+                                        parm_buffer_t *parms)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_get_parms(my_obj, parms);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_do_auto_focus
+ *
+ * DESCRIPTION: performing auto focus
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : if this call success, we will always assume there will
+ *              be an auto_focus event following up.
+ *==========================================================================*/
+static int32_t mm_camera_intf_do_auto_focus(uint32_t camera_handle)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_do_auto_focus(my_obj);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_cancel_auto_focus
+ *
+ * DESCRIPTION: cancel auto focus
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_cancel_auto_focus(uint32_t camera_handle)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_cancel_auto_focus(my_obj);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_prepare_snapshot
+ *
+ * DESCRIPTION: prepare hardware for snapshot
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @do_af_flag   : flag indicating if AF is needed
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_prepare_snapshot(uint32_t camera_handle,
+                                               int32_t do_af_flag)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_prepare_snapshot(my_obj, do_af_flag);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_close
+ *
+ * DESCRIPTION: close a camera by its handle
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_close(uint32_t camera_handle)
+{
+    int32_t rc = -1;
+    uint8_t cam_idx = camera_handle & 0x00ff;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s E: camera_handler = %d ", __func__, camera_handle);
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if (my_obj){
+        my_obj->ref_count--;
+
+        if(my_obj->ref_count > 0) {
+            /* still have reference to obj, return here */
+            CDBG("%s: ref_count=%d\n", __func__, my_obj->ref_count);
+            pthread_mutex_unlock(&g_intf_lock);
+            rc = 0;
+        } else {
+            /* need close camera here as no other reference
+             * first empty g_cam_ctrl's referent to cam_obj */
+            g_cam_ctrl.cam_obj[cam_idx] = NULL;
+
+            pthread_mutex_lock(&my_obj->cam_lock);
+            pthread_mutex_unlock(&g_intf_lock);
+
+            rc = mm_camera_close(my_obj);
+
+            pthread_mutex_destroy(&my_obj->cam_lock);
+            free(my_obj);
+        }
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_error_close
+ *
+ * DESCRIPTION: close the daemon after an unrecoverable error
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_error_close(uint32_t camera_handle)
+{
+    int32_t rc = -1;
+    uint8_t cam_idx = camera_handle & 0x00ff;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s E: camera_handler = %d ", __func__, camera_handle);
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if (my_obj){
+        /*do not decrement the ref_count yet since that will happen during close*/
+        if((my_obj->ref_count - 1) > 0) {
+            /* still have reference to obj, return here */
+            CDBG("%s: ref_count=%d\n", __func__, my_obj->ref_count);
+            pthread_mutex_unlock(&g_intf_lock);
+            rc = 0;
+        } else {
+            /* need close camera here as no other reference*/
+            pthread_mutex_lock(&my_obj->cam_lock);
+            pthread_mutex_unlock(&g_intf_lock);
+
+            rc = mm_camera_close_fd(my_obj);
+        }
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_add_channel
+ *
+ * DESCRIPTION: add a channel
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @attr         : bundle attribute of the channel if needed
+ *   @channel_cb   : callback function for bundle data notify
+ *   @userdata     : user data ptr
+ *
+ * RETURN     : uint32_t type of channel handle
+ *              0  -- invalid channel handle, meaning the op failed
+ *              >0 -- successfully added a channel with a valid handle
+ * NOTE       : if no bundle data notify is needed, meaning each stream in the
+ *              channel will have its own stream data notify callback, then
+ *              attr, channel_cb, and userdata can be NULL. In this case,
+ *              no matching logic will be performed in channel for the bundling.
+ *==========================================================================*/
+static uint32_t mm_camera_intf_add_channel(uint32_t camera_handle,
+                                           mm_camera_channel_attr_t *attr,
+                                           mm_camera_buf_notify_t channel_cb,
+                                           void *userdata)
+{
+    uint32_t ch_id = 0;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E camera_handler = %d", __func__, camera_handle);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        ch_id = mm_camera_add_channel(my_obj, attr, channel_cb, userdata);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X ch_id = %d", __func__, ch_id);
+    return ch_id;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_del_channel
+ *
+ * DESCRIPTION: delete a channel by its handle
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : all streams in the channel should be stopped already before
+ *              this channel can be deleted.
+ *==========================================================================*/
+static int32_t mm_camera_intf_del_channel(uint32_t camera_handle,
+                                          uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E ch_id = %d", __func__, ch_id);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_del_channel(my_obj, ch_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_get_bundle_info
+ *
+ * DESCRIPTION: query bundle info of the channel
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @bundle_info  : bundle info to be filled in
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : all streams in the channel should be stopped already before
+ *              this channel can be deleted.
+ *==========================================================================*/
+static int32_t mm_camera_intf_get_bundle_info(uint32_t camera_handle,
+                                              uint32_t ch_id,
+                                              cam_bundle_config_t *bundle_info)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E ch_id = %d", __func__, ch_id);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_get_bundle_info(my_obj, ch_id, bundle_info);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X", __func__);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_register_event_notify
+ *
+ * DESCRIPTION: register for event notify
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @evt_cb       : callback for event notify
+ *   @user_data    : user data ptr
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_register_event_notify(uint32_t camera_handle,
+                                                    mm_camera_event_notify_t evt_cb,
+                                                    void * user_data)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E ", __func__);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_register_event_notify(my_obj, evt_cb, user_data);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :E rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_qbuf
+ *
+ * DESCRIPTION: enqueue buffer back to kernel
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @buf          : buf ptr to be enqueued
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_qbuf(uint32_t camera_handle,
+                                    uint32_t ch_id,
+                                    mm_camera_buf_def_t *buf)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_qbuf(my_obj, ch_id, buf);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X evt_type = %d",__func__,rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_get_queued_buf_count
+ *
+ * DESCRIPTION: returns the queued buffer count
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @stream_id : stream id
+ *
+ * RETURN     : int32_t - queued buffer count
+ *
+ *==========================================================================*/
+static int32_t mm_camera_intf_get_queued_buf_count(uint32_t camera_handle,
+        uint32_t ch_id, uint32_t stream_id)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_get_queued_buf_count(my_obj, ch_id, stream_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X queued buffer count = %d",__func__,rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_link_stream
+ *
+ * DESCRIPTION: link a stream into a new channel
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @stream_id    : stream id
+ *   @linked_ch_id : channel in which the stream will be linked
+ *
+ * RETURN     : int32_t type of stream handle
+ *              0  -- invalid stream handle, meaning the op failed
+ *              >0 -- successfully linked a stream with a valid handle
+ *==========================================================================*/
+static int32_t mm_camera_intf_link_stream(uint32_t camera_handle,
+        uint32_t ch_id,
+        uint32_t stream_id,
+        uint32_t linked_ch_id)
+{
+    uint32_t id = 0;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s : E handle = %u ch_id = %u",
+         __func__, camera_handle, ch_id);
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        id = mm_camera_link_stream(my_obj, ch_id, stream_id, linked_ch_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+
+    CDBG("%s :X stream_id = %u", __func__, stream_id);
+    return (int32_t)id;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_add_stream
+ *
+ * DESCRIPTION: add a stream into a channel
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : uint32_t type of stream handle
+ *              0  -- invalid stream handle, meaning the op failed
+ *              >0 -- successfully added a stream with a valid handle
+ *==========================================================================*/
+static uint32_t mm_camera_intf_add_stream(uint32_t camera_handle,
+                                          uint32_t ch_id)
+{
+    uint32_t stream_id = 0;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s : E handle = %d ch_id = %d",
+         __func__, camera_handle, ch_id);
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        stream_id = mm_camera_add_stream(my_obj, ch_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X stream_id = %d", __func__, stream_id);
+    return stream_id;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_del_stream
+ *
+ * DESCRIPTION: delete a stream by its handle
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @stream_id    : stream handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : stream should be stopped already before it can be deleted.
+ *==========================================================================*/
+static int32_t mm_camera_intf_del_stream(uint32_t camera_handle,
+                                         uint32_t ch_id,
+                                         uint32_t stream_id)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s : E handle = %d ch_id = %d stream_id = %d",
+         __func__, camera_handle, ch_id, stream_id);
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_del_stream(my_obj, ch_id, stream_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_config_stream
+ *
+ * DESCRIPTION: configure a stream
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @stream_id    : stream handle
+ *   @config       : stream configuration
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_config_stream(uint32_t camera_handle,
+                                            uint32_t ch_id,
+                                            uint32_t stream_id,
+                                            mm_camera_stream_config_t *config)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E handle = %d, ch_id = %d,stream_id = %d",
+         __func__, camera_handle, ch_id, stream_id);
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    CDBG("%s :mm_camera_intf_config_stream stream_id = %d",__func__,stream_id);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_config_stream(my_obj, ch_id, stream_id, config);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_start_channel
+ *
+ * DESCRIPTION: start a channel, which will start all streams in the channel
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_start_channel(uint32_t camera_handle,
+                                            uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_start_channel(my_obj, ch_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_stop_channel
+ *
+ * DESCRIPTION: stop a channel, which will stop all streams in the channel
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_stop_channel(uint32_t camera_handle,
+                                           uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_stop_channel(my_obj, ch_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_request_super_buf
+ *
+ * DESCRIPTION: for burst mode in bundle, reuqest certain amount of matched
+ *              frames from superbuf queue
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @num_buf_requested : number of matched frames needed
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_request_super_buf(uint32_t camera_handle,
+                                                uint32_t ch_id,
+                                                uint32_t num_buf_requested,
+                                                uint32_t num_retro_buf_requested)
+{
+    int32_t rc = -1;
+    CDBG("%s :E camera_handler = %d,ch_id = %d",
+         __func__, camera_handle, ch_id);
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_request_super_buf (my_obj, ch_id,
+          num_buf_requested, num_retro_buf_requested);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_cancel_super_buf_request
+ *
+ * DESCRIPTION: for burst mode in bundle, cancel the reuqest for certain amount
+ *              of matched frames from superbuf queue
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_cancel_super_buf_request(uint32_t camera_handle,
+                                                       uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E camera_handler = %d,ch_id = %d",
+         __func__, camera_handle, ch_id);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_cancel_super_buf_request(my_obj, ch_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_flush_super_buf_queue
+ *
+ * DESCRIPTION: flush out all frames in the superbuf queue
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @frame_idx    : frame index
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_flush_super_buf_queue(uint32_t camera_handle,
+                                                    uint32_t ch_id, uint32_t frame_idx)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E camera_handler = %d,ch_id = %d",
+         __func__, camera_handle, ch_id);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_flush_super_buf_queue(my_obj, ch_id, frame_idx);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_start_zsl_snapshot
+ *
+ * DESCRIPTION: Starts zsl snapshot
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_start_zsl_snapshot(uint32_t camera_handle,
+        uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E camera_handler = %d,ch_id = %d",
+         __func__, camera_handle, ch_id);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_start_zsl_snapshot_ch(my_obj, ch_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_stop_zsl_snapshot
+ *
+ * DESCRIPTION: Stops zsl snapshot
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_stop_zsl_snapshot(uint32_t camera_handle,
+        uint32_t ch_id)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E camera_handler = %d,ch_id = %d",
+         __func__, camera_handle, ch_id);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_stop_zsl_snapshot_ch(my_obj, ch_id);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_configure_notify_mode
+ *
+ * DESCRIPTION: Configures channel notification mode
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @notify_mode  : notification mode
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_configure_notify_mode(uint32_t camera_handle,
+                                                    uint32_t ch_id,
+                                                    mm_camera_super_buf_notify_mode_t notify_mode)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s :E camera_handler = %d,ch_id = %d",
+         __func__, camera_handle, ch_id);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_config_channel_notify(my_obj, ch_id, notify_mode);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_map_buf
+ *
+ * DESCRIPTION: mapping camera buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @buf_type     : type of buffer to be mapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_CAPABILITY
+ *                   CAM_MAPPING_BUF_TYPE_SETPARM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_GETPARM_BUF
+ *   @fd           : file descriptor of the buffer
+ *   @size         : size of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_map_buf(uint32_t camera_handle,
+                                      uint8_t buf_type,
+                                      int fd,
+                                      size_t size)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_map_buf(my_obj, buf_type, fd, size);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_unmap_buf
+ *
+ * DESCRIPTION: unmapping camera buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @buf_type     : type of buffer to be unmapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_CAPABILITY
+ *                   CAM_MAPPING_BUF_TYPE_SETPARM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_GETPARM_BUF
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_unmap_buf(uint32_t camera_handle,
+                                        uint8_t buf_type)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_unmap_buf(my_obj, buf_type);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_set_stream_parms
+ *
+ * DESCRIPTION: set parameters per stream
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @s_id         : stream handle
+ *   @parms        : ptr to a param struct to be set to server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Corresponding fields of parameters to be set
+ *              are already filled in by upper layer caller.
+ *==========================================================================*/
+static int32_t mm_camera_intf_set_stream_parms(uint32_t camera_handle,
+                                               uint32_t ch_id,
+                                               uint32_t s_id,
+                                               cam_stream_parm_buffer_t *parms)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    CDBG("%s :E camera_handle = %d,ch_id = %d,s_id = %d",
+         __func__, camera_handle, ch_id, s_id);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_set_stream_parms(my_obj, ch_id, s_id, parms);
+    }else{
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_get_stream_parms
+ *
+ * DESCRIPTION: get parameters per stream
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @s_id         : stream handle
+ *   @parms        : ptr to a param struct to be get from server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Parameters to be get from server are already
+ *              filled in by upper layer caller. After this call, corresponding
+ *              fields of requested parameters will be filled in by server with
+ *              detailed information.
+ *==========================================================================*/
+static int32_t mm_camera_intf_get_stream_parms(uint32_t camera_handle,
+                                               uint32_t ch_id,
+                                               uint32_t s_id,
+                                               cam_stream_parm_buffer_t *parms)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    CDBG("%s :E camera_handle = %d,ch_id = %d,s_id = %d",
+         __func__, camera_handle, ch_id, s_id);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_get_stream_parms(my_obj, ch_id, s_id, parms);
+    }else{
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_map_stream_buf
+ *
+ * DESCRIPTION: mapping stream buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @s_id         : stream handle
+ *   @buf_type     : type of buffer to be mapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_INFO
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @buf_idx      : index of buffer within the stream buffers, only valid if
+ *                   buf_type is CAM_MAPPING_BUF_TYPE_STREAM_BUF or
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @plane_idx    : plane index. If all planes share the same fd,
+ *                   plane_idx = -1; otherwise, plean_idx is the
+ *                   index to plane (0..num_of_planes)
+ *   @fd           : file descriptor of the buffer
+ *   @size         : size of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_map_stream_buf(uint32_t camera_handle,
+                                             uint32_t ch_id,
+                                             uint32_t stream_id,
+                                             uint8_t buf_type,
+                                             uint32_t buf_idx,
+                                             int32_t plane_idx,
+                                             int fd,
+                                             size_t size)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    CDBG("%s :E camera_handle = %d, ch_id = %d, s_id = %d, buf_idx = %d, plane_idx = %d",
+         __func__, camera_handle, ch_id, stream_id, buf_idx, plane_idx);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_map_stream_buf(my_obj, ch_id, stream_id,
+                                      buf_type, buf_idx, plane_idx,
+                                      fd, size);
+    }else{
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_unmap_stream_buf
+ *
+ * DESCRIPTION: unmapping stream buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @ch_id        : channel handle
+ *   @s_id         : stream handle
+ *   @buf_type     : type of buffer to be unmapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_INFO
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @buf_idx      : index of buffer within the stream buffers, only valid if
+ *                   buf_type is CAM_MAPPING_BUF_TYPE_STREAM_BUF or
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @plane_idx    : plane index. If all planes share the same fd,
+ *                   plane_idx = -1; otherwise, plean_idx is the
+ *                   index to plane (0..num_of_planes)
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_unmap_stream_buf(uint32_t camera_handle,
+                                               uint32_t ch_id,
+                                               uint32_t stream_id,
+                                               uint8_t buf_type,
+                                               uint32_t buf_idx,
+                                               int32_t plane_idx)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    CDBG("%s :E camera_handle = %d, ch_id = %d, s_id = %d, buf_idx = %d, plane_idx = %d",
+         __func__, camera_handle, ch_id, stream_id, buf_idx, plane_idx);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_unmap_stream_buf(my_obj, ch_id, stream_id,
+                                        buf_type, buf_idx, plane_idx);
+    }else{
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : get_sensor_info
+ *
+ * DESCRIPTION: get sensor info like facing(back/front) and mount angle
+ *
+ * PARAMETERS :
+ *
+ * RETURN     :
+ *==========================================================================*/
+void get_sensor_info()
+{
+    int rc = 0;
+    int dev_fd = -1;
+    struct media_device_info mdev_info;
+    int num_media_devices = 0;
+    size_t num_cameras = 0;
+
+    CDBG("%s : E", __func__);
+    while (1) {
+        char dev_name[32];
+        snprintf(dev_name, sizeof(dev_name), "/dev/media%d", num_media_devices);
+        dev_fd = open(dev_name, O_RDWR | O_NONBLOCK);
+        if (dev_fd < 0) {
+            CDBG("Done discovering media devices\n");
+            break;
+        }
+        num_media_devices++;
+        memset(&mdev_info, 0, sizeof(mdev_info));
+        rc = ioctl(dev_fd, MEDIA_IOC_DEVICE_INFO, &mdev_info);
+        if (rc < 0) {
+            CDBG_ERROR("Error: ioctl media_dev failed: %s\n", strerror(errno));
+            close(dev_fd);
+            dev_fd = -1;
+            num_cameras = 0;
+            break;
+        }
+
+        if(strncmp(mdev_info.model,  MSM_CONFIGURATION_NAME, sizeof(mdev_info.model)) != 0) {
+            close(dev_fd);
+            dev_fd = -1;
+            continue;
+        }
+
+        unsigned int num_entities = 1;
+        while (1) {
+            struct media_entity_desc entity;
+            uint32_t temp;
+            uint32_t mount_angle;
+            uint32_t facing;
+
+            memset(&entity, 0, sizeof(entity));
+            entity.id = num_entities++;
+            rc = ioctl(dev_fd, MEDIA_IOC_ENUM_ENTITIES, &entity);
+            if (rc < 0) {
+                CDBG("Done enumerating media entities\n");
+                rc = 0;
+                break;
+            }
+            if(entity.type == MEDIA_ENT_T_V4L2_SUBDEV &&
+                entity.group_id == MSM_CAMERA_SUBDEV_SENSOR) {
+                temp = entity.flags >> 8;
+                mount_angle = (temp & 0xFF) * 90;
+                facing = (temp >> 8);
+                ALOGD("index = %u flag = %x mount_angle = %u facing = %u\n",
+                    (unsigned int)num_cameras, (unsigned int)temp,
+                    (unsigned int)mount_angle, (unsigned int)facing);
+                g_cam_ctrl.info[num_cameras].facing = (int)facing;
+                g_cam_ctrl.info[num_cameras].orientation = (int)mount_angle;
+                num_cameras++;
+                continue;
+            }
+        }
+
+        CDBG("%s: dev_info[id=%zu,name='%s']\n",
+            __func__, num_cameras, g_cam_ctrl.video_dev_name[num_cameras]);
+
+        close(dev_fd);
+        dev_fd = -1;
+    }
+
+    CDBG("%s: num_cameras=%d\n", __func__, g_cam_ctrl.num_cam);
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : sort_camera_info
+ *
+ * DESCRIPTION: sort camera info to keep back cameras idx is smaller than front cameras idx
+ *
+ * PARAMETERS : number of cameras
+ *
+ * RETURN     :
+ *==========================================================================*/
+void sort_camera_info(int num_cam)
+{
+    int idx = 0, i;
+    struct camera_info temp_info[MM_CAMERA_MAX_NUM_SENSORS];
+    char temp_dev_name[MM_CAMERA_MAX_NUM_SENSORS][MM_CAMERA_DEV_NAME_LEN];
+    memset(temp_info, 0, sizeof(temp_info));
+    memset(temp_dev_name, 0, sizeof(temp_dev_name));
+
+    /* firstly save the back cameras info*/
+    for (i = 0; i < num_cam; i++) {
+        if (g_cam_ctrl.info[i].facing == CAMERA_FACING_BACK) {
+            temp_info[idx] = g_cam_ctrl.info[i];
+            memcpy(temp_dev_name[idx++],g_cam_ctrl.video_dev_name[i],
+                MM_CAMERA_DEV_NAME_LEN);
+        }
+    }
+
+    /* then save the front cameras info*/
+    for (i = 0; i < num_cam; i++) {
+        if (g_cam_ctrl.info[i].facing == CAMERA_FACING_FRONT) {
+            temp_info[idx] = g_cam_ctrl.info[i];
+            memcpy(temp_dev_name[idx++],g_cam_ctrl.video_dev_name[i],
+                MM_CAMERA_DEV_NAME_LEN);
+        }
+    }
+
+    if (idx == num_cam) {
+        memcpy(g_cam_ctrl.info, temp_info, sizeof(temp_info));
+        memcpy(g_cam_ctrl.video_dev_name, temp_dev_name, sizeof(temp_dev_name));
+    } else {
+        ALOGE("%s: Failed to sort all cameras!", __func__);
+        ALOGE("%s: Number of cameras %d sorted %d", __func__, num_cam, idx);
+    }
+    return;
+}
+
+/*===========================================================================
+ * FUNCTION   : get_num_of_cameras
+ *
+ * DESCRIPTION: get number of cameras
+ *
+ * PARAMETERS :
+ *
+ * RETURN     : number of cameras supported
+ *==========================================================================*/
+uint8_t get_num_of_cameras()
+{
+    int rc = 0;
+    int dev_fd = -1;
+    struct media_device_info mdev_info;
+    int num_media_devices = 0;
+    int8_t num_cameras = 0;
+    char subdev_name[32];
+    int32_t sd_fd = -1;
+    struct sensor_init_cfg_data cfg;
+    char prop[PROPERTY_VALUE_MAX];
+    uint32_t globalLogLevel = 0;
+
+    property_get("persist.camera.hal.debug", prop, "0");
+    int val = atoi(prop);
+    if (0 <= val) {
+        gMmCameraIntfLogLevel = (uint32_t)val;
+    }
+    property_get("persist.camera.global.debug", prop, "0");
+    val = atoi(prop);
+    if (0 <= val) {
+        globalLogLevel = (uint32_t)val;
+    }
+
+    /* Highest log level among hal.logs and global.logs is selected */
+    if (gMmCameraIntfLogLevel < globalLogLevel)
+        gMmCameraIntfLogLevel = globalLogLevel;
+
+    CDBG("%s : E", __func__);
+
+    property_get("vold.decrypt", prop, "0");
+    int decrypt = atoi(prop);
+    if (decrypt == 1)
+     return 0;
+
+    /* lock the mutex */
+    pthread_mutex_lock(&g_intf_lock);
+
+    while (1) {
+        uint32_t num_entities = 1U;
+        char dev_name[32];
+
+        snprintf(dev_name, sizeof(dev_name), "/dev/media%d", num_media_devices);
+        dev_fd = open(dev_name, O_RDWR | O_NONBLOCK);
+        if (dev_fd < 0) {
+            CDBG("Done discovering media devices\n");
+            break;
+        }
+        num_media_devices++;
+        rc = ioctl(dev_fd, MEDIA_IOC_DEVICE_INFO, &mdev_info);
+        if (rc < 0) {
+            CDBG_ERROR("Error: ioctl media_dev failed: %s\n", strerror(errno));
+            close(dev_fd);
+            dev_fd = -1;
+            break;
+        }
+
+        if (strncmp(mdev_info.model, MSM_CONFIGURATION_NAME,
+          sizeof(mdev_info.model)) != 0) {
+            close(dev_fd);
+            dev_fd = -1;
+            continue;
+        }
+
+        while (1) {
+            struct media_entity_desc entity;
+            memset(&entity, 0, sizeof(entity));
+            entity.id = num_entities++;
+            CDBG("entity id %d", entity.id);
+            rc = ioctl(dev_fd, MEDIA_IOC_ENUM_ENTITIES, &entity);
+            if (rc < 0) {
+                CDBG("Done enumerating media entities");
+                rc = 0;
+                break;
+            }
+            CDBG("entity name %s type %d group id %d",
+                entity.name, entity.type, entity.group_id);
+            if (entity.type == MEDIA_ENT_T_V4L2_SUBDEV &&
+                entity.group_id == MSM_CAMERA_SUBDEV_SENSOR_INIT) {
+                snprintf(subdev_name, sizeof(dev_name), "/dev/%s", entity.name);
+                break;
+            }
+        }
+        close(dev_fd);
+        dev_fd = -1;
+    }
+
+    /* Open sensor_init subdev */
+    sd_fd = open(subdev_name, O_RDWR);
+    if (sd_fd < 0) {
+        CDBG_ERROR("Open sensor_init subdev failed");
+        return FALSE;
+    }
+
+    cfg.cfgtype = CFG_SINIT_PROBE_WAIT_DONE;
+    cfg.cfg.setting = NULL;
+    if (ioctl(sd_fd, VIDIOC_MSM_SENSOR_INIT_CFG, &cfg) < 0) {
+        CDBG_ERROR("failed");
+    }
+    close(sd_fd);
+    dev_fd = -1;
+
+
+    num_media_devices = 0;
+    while (1) {
+        uint32_t num_entities = 1U;
+        char dev_name[32];
+
+        snprintf(dev_name, sizeof(dev_name), "/dev/media%d", num_media_devices);
+        dev_fd = open(dev_name, O_RDWR | O_NONBLOCK);
+        if (dev_fd < 0) {
+            CDBG("Done discovering media devices: %s\n", strerror(errno));
+            break;
+        }
+        num_media_devices++;
+        memset(&mdev_info, 0, sizeof(mdev_info));
+        rc = ioctl(dev_fd, MEDIA_IOC_DEVICE_INFO, &mdev_info);
+        if (rc < 0) {
+            CDBG_ERROR("Error: ioctl media_dev failed: %s\n", strerror(errno));
+            close(dev_fd);
+            dev_fd = -1;
+            num_cameras = 0;
+            break;
+        }
+
+        if(strncmp(mdev_info.model, MSM_CAMERA_NAME, sizeof(mdev_info.model)) != 0) {
+            close(dev_fd);
+            dev_fd = -1;
+            continue;
+        }
+
+        while (1) {
+            struct media_entity_desc entity;
+            memset(&entity, 0, sizeof(entity));
+            entity.id = num_entities++;
+            rc = ioctl(dev_fd, MEDIA_IOC_ENUM_ENTITIES, &entity);
+            if (rc < 0) {
+                CDBG("Done enumerating media entities\n");
+                rc = 0;
+                break;
+            }
+            if(entity.type == MEDIA_ENT_T_DEVNODE_V4L && entity.group_id == QCAMERA_VNODE_GROUP_ID) {
+                strlcpy(g_cam_ctrl.video_dev_name[num_cameras],
+                     entity.name, sizeof(entity.name));
+                break;
+            }
+        }
+
+        CDBG("%s: dev_info[id=%d,name='%s']\n",
+            __func__, (int)num_cameras, g_cam_ctrl.video_dev_name[num_cameras]);
+
+        num_cameras++;
+        close(dev_fd);
+        dev_fd = -1;
+    }
+    g_cam_ctrl.num_cam = num_cameras;
+
+    get_sensor_info();
+    sort_camera_info(g_cam_ctrl.num_cam);
+    /* unlock the mutex */
+    pthread_mutex_unlock(&g_intf_lock);
+    CDBG("%s: num_cameras=%d\n", __func__, (int)g_cam_ctrl.num_cam);
+    return(uint8_t)g_cam_ctrl.num_cam;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_intf_process_advanced_capture
+ *
+ * DESCRIPTION: Configures channel advanced capture mode
+ *
+ * PARAMETERS :
+ *   @camera_handle: camera handle
+ *   @type : advanced capture type
+ *   @ch_id        : channel handle
+ *   @trigger  : 1 for start and 0 for cancel/stop
+ *   @value  : input capture configaration
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_intf_process_advanced_capture(uint32_t camera_handle,
+        uint32_t ch_id, mm_camera_advanced_capture_t type,
+        int8_t trigger, void *in_value)
+{
+    int32_t rc = -1;
+    mm_camera_obj_t * my_obj = NULL;
+
+    CDBG("%s: E camera_handler = %d,ch_id = %d",
+         __func__, camera_handle, ch_id);
+    pthread_mutex_lock(&g_intf_lock);
+    my_obj = mm_camera_util_get_camera_by_handler(camera_handle);
+
+    if(my_obj) {
+        pthread_mutex_lock(&my_obj->cam_lock);
+        pthread_mutex_unlock(&g_intf_lock);
+        rc = mm_camera_channel_advanced_capture(my_obj, ch_id, type,
+                (uint32_t)trigger, in_value);
+    } else {
+        pthread_mutex_unlock(&g_intf_lock);
+    }
+    CDBG("%s: X ", __func__);
+    return rc;
+}
+
+struct camera_info *get_cam_info(uint32_t camera_id)
+{
+    return &g_cam_ctrl.info[camera_id];
+}
+
+/* camera ops v-table */
+static mm_camera_ops_t mm_camera_ops = {
+    .query_capability = mm_camera_intf_query_capability,
+    .register_event_notify = mm_camera_intf_register_event_notify,
+    .close_camera = mm_camera_intf_close,
+    .error_close_camera = mm_camera_intf_error_close,
+    .set_parms = mm_camera_intf_set_parms,
+    .get_parms = mm_camera_intf_get_parms,
+    .do_auto_focus = mm_camera_intf_do_auto_focus,
+    .cancel_auto_focus = mm_camera_intf_cancel_auto_focus,
+    .prepare_snapshot = mm_camera_intf_prepare_snapshot,
+    .start_zsl_snapshot = mm_camera_intf_start_zsl_snapshot,
+    .stop_zsl_snapshot = mm_camera_intf_stop_zsl_snapshot,
+    .map_buf = mm_camera_intf_map_buf,
+    .unmap_buf = mm_camera_intf_unmap_buf,
+    .add_channel = mm_camera_intf_add_channel,
+    .delete_channel = mm_camera_intf_del_channel,
+    .get_bundle_info = mm_camera_intf_get_bundle_info,
+    .add_stream = mm_camera_intf_add_stream,
+    .link_stream = mm_camera_intf_link_stream,
+    .delete_stream = mm_camera_intf_del_stream,
+    .config_stream = mm_camera_intf_config_stream,
+    .qbuf = mm_camera_intf_qbuf,
+    .get_queued_buf_count = mm_camera_intf_get_queued_buf_count,
+    .map_stream_buf = mm_camera_intf_map_stream_buf,
+    .unmap_stream_buf = mm_camera_intf_unmap_stream_buf,
+    .set_stream_parms = mm_camera_intf_set_stream_parms,
+    .get_stream_parms = mm_camera_intf_get_stream_parms,
+    .start_channel = mm_camera_intf_start_channel,
+    .stop_channel = mm_camera_intf_stop_channel,
+    .request_super_buf = mm_camera_intf_request_super_buf,
+    .cancel_super_buf_request = mm_camera_intf_cancel_super_buf_request,
+    .flush_super_buf_queue = mm_camera_intf_flush_super_buf_queue,
+    .configure_notify_mode = mm_camera_intf_configure_notify_mode,
+    .process_advanced_capture = mm_camera_intf_process_advanced_capture
+};
+
+/*===========================================================================
+ * FUNCTION   : camera_open
+ *
+ * DESCRIPTION: open a camera by camera index
+ *
+ * PARAMETERS :
+ *   @camera_idx  : camera index. should within range of 0 to num_of_cameras
+ *   @camera_vtbl : ptr to a virtual table containing camera handle and operation table.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              non-zero error code -- failure
+ *==========================================================================*/
+int32_t camera_open(uint8_t camera_idx, mm_camera_vtbl_t **camera_vtbl)
+{
+    int32_t rc = 0;
+    mm_camera_obj_t *cam_obj = NULL;
+
+    CDBG("%s: E camera_idx = %d\n", __func__, camera_idx);
+    if (camera_idx >= g_cam_ctrl.num_cam) {
+        CDBG_ERROR("%s: Invalid camera_idx (%d)", __func__, camera_idx);
+        return -EINVAL;
+    }
+
+    pthread_mutex_lock(&g_intf_lock);
+    /* opened already */
+    if(NULL != g_cam_ctrl.cam_obj[camera_idx]) {
+        /* Add reference */
+        g_cam_ctrl.cam_obj[camera_idx]->ref_count++;
+        pthread_mutex_unlock(&g_intf_lock);
+        CDBG("%s:  opened alreadyn", __func__);
+        *camera_vtbl = &g_cam_ctrl.cam_obj[camera_idx]->vtbl;
+        return rc;
+    }
+
+    cam_obj = (mm_camera_obj_t *)malloc(sizeof(mm_camera_obj_t));
+    if(NULL == cam_obj) {
+        pthread_mutex_unlock(&g_intf_lock);
+        CDBG_ERROR("%s:  no mem", __func__);
+        return -EINVAL;
+    }
+
+    /* initialize camera obj */
+    memset(cam_obj, 0, sizeof(mm_camera_obj_t));
+    cam_obj->ctrl_fd = -1;
+    cam_obj->ds_fd = -1;
+    cam_obj->ref_count++;
+    cam_obj->my_hdl = mm_camera_util_generate_handler(camera_idx);
+    cam_obj->vtbl.camera_handle = cam_obj->my_hdl; /* set handler */
+    cam_obj->vtbl.ops = &mm_camera_ops;
+    pthread_mutex_init(&cam_obj->cam_lock, NULL);
+    /* unlock global interface lock, if not, in dual camera use case,
+      * current open will block operation of another opened camera obj*/
+    pthread_mutex_lock(&cam_obj->cam_lock);
+    pthread_mutex_unlock(&g_intf_lock);
+
+    rc = mm_camera_open(cam_obj);
+
+    pthread_mutex_lock(&g_intf_lock);
+    if (rc != 0) {
+        CDBG_ERROR("%s: mm_camera_open err = %d", __func__, rc);
+        pthread_mutex_destroy(&cam_obj->cam_lock);
+        g_cam_ctrl.cam_obj[camera_idx] = NULL;
+        free(cam_obj);
+        cam_obj = NULL;
+        pthread_mutex_unlock(&g_intf_lock);
+        *camera_vtbl = NULL;
+        return rc;
+    } else {
+        CDBG("%s: Open succeded\n", __func__);
+        g_cam_ctrl.cam_obj[camera_idx] = cam_obj;
+        pthread_mutex_unlock(&g_intf_lock);
+        *camera_vtbl = &cam_obj->vtbl;
+        return 0;
+    }
+}
diff --git a/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_sock.c b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_sock.c
new file mode 100755
index 0000000..4676791
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_sock.c
@@ -0,0 +1,231 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+
+#include "mm_camera_dbg.h"
+#include "mm_camera_sock.h"
+#include "cam_types.h"
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_socket_create
+ *
+ * DESCRIPTION: opens a domain socket tied to camera ID and socket type
+ *  @cam_id   : camera ID
+ *  @sock_type: socket type, TCP/UDP
+ *
+ * RETURN     : fd related to the domain socket
+ *==========================================================================*/
+int mm_camera_socket_create(int cam_id, mm_camera_sock_type_t sock_type)
+{
+    int socket_fd;
+    mm_camera_sock_addr_t sock_addr;
+    int sktype;
+    int rc;
+
+    switch (sock_type)
+    {
+      case MM_CAMERA_SOCK_TYPE_UDP:
+        sktype = SOCK_DGRAM;
+        break;
+      case MM_CAMERA_SOCK_TYPE_TCP:
+        sktype = SOCK_STREAM;
+        break;
+      default:
+        CDBG_ERROR("%s: unknown socket type =%d", __func__, sock_type);
+        return -1;
+    }
+    socket_fd = socket(AF_UNIX, sktype, 0);
+    if (socket_fd < 0) {
+        CDBG_ERROR("%s: error create socket fd =%d", __func__, socket_fd);
+        return socket_fd;
+    }
+
+    memset(&sock_addr, 0, sizeof(sock_addr));
+    sock_addr.addr_un.sun_family = AF_UNIX;
+    snprintf(sock_addr.addr_un.sun_path,
+             UNIX_PATH_MAX, QCAMERA_DUMP_FRM_LOCATION"cam_socket%d", cam_id);
+    rc = connect(socket_fd, &sock_addr.addr, sizeof(sock_addr.addr_un));
+    if (0 != rc) {
+      close(socket_fd);
+      socket_fd = -1;
+      CDBG_ERROR("%s: socket_fd=%d %s ", __func__, socket_fd, strerror(errno));
+    }
+
+    CDBG("%s: socket_fd=%d %s", __func__, socket_fd,
+        sock_addr.addr_un.sun_path);
+    return socket_fd;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_socket_close
+ *
+ * DESCRIPTION:  close domain socket by its fd
+ *   @fd      : file descriptor for the domain socket to be closed
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void mm_camera_socket_close(int fd)
+{
+    if (fd >= 0) {
+      close(fd);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_socket_sendmsg
+ *
+ * DESCRIPTION:  send msg through domain socket
+ *   @fd      : socket fd
+ *   @msg     : pointer to msg to be sent over domain socket
+ *   @sendfd  : file descriptors to be sent
+ *
+ * RETURN     : the total bytes of sent msg
+ *==========================================================================*/
+int mm_camera_socket_sendmsg(
+  int fd,
+  void *msg,
+  size_t buf_size,
+  int sendfd)
+{
+    struct msghdr msgh;
+    struct iovec iov[1];
+    struct cmsghdr * cmsghp = NULL;
+    char control[CMSG_SPACE(sizeof(int))];
+
+    if (msg == NULL) {
+      CDBG("%s: msg is NULL", __func__);
+      return -1;
+    }
+    memset(&msgh, 0, sizeof(msgh));
+    msgh.msg_name = NULL;
+    msgh.msg_namelen = 0;
+
+    iov[0].iov_base = msg;
+    iov[0].iov_len = buf_size;
+    msgh.msg_iov = iov;
+    msgh.msg_iovlen = 1;
+    CDBG("%s: iov_len=%llu", __func__,
+            (unsigned long long int)iov[0].iov_len);
+
+    msgh.msg_control = NULL;
+    msgh.msg_controllen = 0;
+
+    /* if sendfd is valid, we need to pass it through control msg */
+    if( sendfd >= 0) {
+      msgh.msg_control = control;
+      msgh.msg_controllen = sizeof(control);
+      cmsghp = CMSG_FIRSTHDR(&msgh);
+      if (cmsghp != NULL) {
+        CDBG("%s: Got ctrl msg pointer", __func__);
+        cmsghp->cmsg_level = SOL_SOCKET;
+        cmsghp->cmsg_type = SCM_RIGHTS;
+        cmsghp->cmsg_len = CMSG_LEN(sizeof(int));
+        *((int *)CMSG_DATA(cmsghp)) = sendfd;
+        CDBG("%s: cmsg data=%d", __func__, *((int *) CMSG_DATA(cmsghp)));
+      } else {
+        CDBG("%s: ctrl msg NULL", __func__);
+        return -1;
+      }
+    }
+
+    return sendmsg(fd, &(msgh), 0);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_socket_recvmsg
+ *
+ * DESCRIPTION:  receive msg from domain socket.
+ *   @fd      : socket fd
+ *   @msg     : pointer to mm_camera_sock_msg_packet_t to hold incoming msg,
+ *              need be allocated by the caller
+ *   @buf_size: the size of the buf that holds incoming msg
+ *   @rcvdfd  : pointer to hold recvd file descriptor if not NULL.
+ *
+ * RETURN     : the total bytes of received msg
+ *==========================================================================*/
+int mm_camera_socket_recvmsg(
+  int fd,
+  void *msg,
+  uint32_t buf_size,
+  int *rcvdfd)
+{
+    struct msghdr msgh;
+    struct iovec iov[1];
+    struct cmsghdr *cmsghp = NULL;
+    char control[CMSG_SPACE(sizeof(int))];
+    int rcvd_fd = -1;
+    int rcvd_len = 0;
+
+    if ( (msg == NULL) || (buf_size <= 0) ) {
+      CDBG_ERROR(" %s: msg buf is NULL", __func__);
+      return -1;
+    }
+
+    memset(&msgh, 0, sizeof(msgh));
+    msgh.msg_name = NULL;
+    msgh.msg_namelen = 0;
+    msgh.msg_control = control;
+    msgh.msg_controllen = sizeof(control);
+
+    iov[0].iov_base = msg;
+    iov[0].iov_len = buf_size;
+    msgh.msg_iov = iov;
+    msgh.msg_iovlen = 1;
+
+    if ( (rcvd_len = recvmsg(fd, &(msgh), 0)) <= 0) {
+      CDBG_ERROR(" %s: recvmsg failed", __func__);
+      return rcvd_len;
+    }
+
+    CDBG("%s:  msg_ctrl %p len %zd", __func__, msgh.msg_control,
+        msgh.msg_controllen);
+
+    if( ((cmsghp = CMSG_FIRSTHDR(&msgh)) != NULL) &&
+        (cmsghp->cmsg_len == CMSG_LEN(sizeof(int))) ) {
+      if (cmsghp->cmsg_level == SOL_SOCKET &&
+        cmsghp->cmsg_type == SCM_RIGHTS) {
+        CDBG("%s:  CtrlMsg is valid", __func__);
+        rcvd_fd = *((int *) CMSG_DATA(cmsghp));
+        CDBG("%s:  Receieved fd=%d", __func__, rcvd_fd);
+      } else {
+        CDBG_ERROR("%s:  Unexpected Control Msg. Line=%d", __func__, __LINE__);
+      }
+    }
+
+    if (rcvdfd) {
+      *rcvdfd = rcvd_fd;
+    }
+
+    return rcvd_len;
+}
diff --git a/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_stream.c b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_stream.c
new file mode 100755
index 0000000..8511bba
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_stream.c
@@ -0,0 +1,3577 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <time.h>
+#include <cam_semaphore.h>
+#ifdef VENUS_PRESENT
+#include <media/msm_media_info.h>
+#endif
+
+#include "mm_camera_dbg.h"
+#include "mm_camera_interface.h"
+#include "mm_camera.h"
+
+/* internal function decalre */
+int32_t mm_stream_qbuf(mm_stream_t *my_obj,
+                       mm_camera_buf_def_t *buf);
+int32_t mm_stream_set_ext_mode(mm_stream_t * my_obj);
+int32_t mm_stream_set_fmt(mm_stream_t * my_obj);
+int32_t mm_stream_sync_info(mm_stream_t *my_obj);
+int32_t mm_stream_init_bufs(mm_stream_t * my_obj);
+int32_t mm_stream_deinit_bufs(mm_stream_t * my_obj);
+int32_t mm_stream_request_buf(mm_stream_t * my_obj);
+int32_t mm_stream_unreg_buf(mm_stream_t * my_obj);
+int32_t mm_stream_release(mm_stream_t *my_obj);
+int32_t mm_stream_set_parm(mm_stream_t *my_obj,
+                           cam_stream_parm_buffer_t *value);
+int32_t mm_stream_get_parm(mm_stream_t *my_obj,
+                           cam_stream_parm_buffer_t *value);
+int32_t mm_stream_do_action(mm_stream_t *my_obj,
+                            void *in_value);
+int32_t mm_stream_streamon(mm_stream_t *my_obj);
+int32_t mm_stream_streamoff(mm_stream_t *my_obj);
+int32_t mm_stream_read_msm_frame(mm_stream_t * my_obj,
+                                 mm_camera_buf_info_t* buf_info,
+                                 uint8_t num_planes);
+int32_t mm_stream_read_user_buf(mm_stream_t * my_obj,
+        mm_camera_buf_info_t* buf_info);
+int32_t mm_stream_write_user_buf(mm_stream_t * my_obj,
+        mm_camera_buf_def_t *buf);
+
+int32_t mm_stream_config(mm_stream_t *my_obj,
+                         mm_camera_stream_config_t *config);
+int32_t mm_stream_reg_buf(mm_stream_t * my_obj);
+int32_t mm_stream_buf_done(mm_stream_t * my_obj,
+                           mm_camera_buf_def_t *frame);
+int32_t mm_stream_get_queued_buf_count(mm_stream_t * my_obj);
+
+int32_t mm_stream_calc_offset(mm_stream_t *my_obj);
+int32_t mm_stream_calc_offset_preview(cam_format_t fmt,
+                                      cam_dimension_t *dim,
+                                      cam_stream_buf_plane_info_t *buf_planes);
+int32_t mm_stream_calc_offset_post_view(cam_format_t fmt,
+                                      cam_dimension_t *dim,
+                                      cam_stream_buf_plane_info_t *buf_planes);
+
+int32_t mm_stream_calc_offset_snapshot(cam_format_t fmt,
+                                       cam_dimension_t *dim,
+                                       cam_padding_info_t *padding,
+                                       cam_stream_buf_plane_info_t *buf_planes);
+int32_t mm_stream_calc_offset_raw(cam_format_t fmt,
+                                  cam_dimension_t *dim,
+                                  cam_padding_info_t *padding,
+                                  cam_stream_buf_plane_info_t *buf_planes);
+int32_t mm_stream_calc_offset_video(cam_dimension_t *dim,
+                                    cam_stream_buf_plane_info_t *buf_planes);
+int32_t mm_stream_calc_offset_metadata(cam_dimension_t *dim,
+                                       cam_padding_info_t *padding,
+                                       cam_stream_buf_plane_info_t *buf_planes);
+int32_t mm_stream_calc_offset_postproc(cam_stream_info_t *stream_info,
+                                       cam_padding_info_t *padding,
+                                       cam_stream_buf_plane_info_t *plns);
+
+
+/* state machine function declare */
+int32_t mm_stream_fsm_inited(mm_stream_t * my_obj,
+                             mm_stream_evt_type_t evt,
+                             void * in_val,
+                             void * out_val);
+int32_t mm_stream_fsm_acquired(mm_stream_t * my_obj,
+                               mm_stream_evt_type_t evt,
+                               void * in_val,
+                               void * out_val);
+int32_t mm_stream_fsm_cfg(mm_stream_t * my_obj,
+                          mm_stream_evt_type_t evt,
+                          void * in_val,
+                          void * out_val);
+int32_t mm_stream_fsm_buffed(mm_stream_t * my_obj,
+                             mm_stream_evt_type_t evt,
+                             void * in_val,
+                             void * out_val);
+int32_t mm_stream_fsm_reg(mm_stream_t * my_obj,
+                          mm_stream_evt_type_t evt,
+                          void * in_val,
+                          void * out_val);
+int32_t mm_stream_fsm_active(mm_stream_t * my_obj,
+                             mm_stream_evt_type_t evt,
+                             void * in_val,
+                             void * out_val);
+uint32_t mm_stream_get_v4l2_fmt(cam_format_t fmt);
+
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_notify_channel
+ *
+ * DESCRIPTION: function to notify channel object on received buffer
+ *
+ * PARAMETERS :
+ *   @ch_obj  : channel object
+ *   @buf_info: ptr to struct storing buffer information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              0> -- failure
+ *==========================================================================*/
+int32_t mm_stream_notify_channel(struct mm_channel* ch_obj,
+        mm_camera_buf_info_t *buf_info)
+{
+    int32_t rc = 0;
+    mm_camera_cmdcb_t* node = NULL;
+
+    if ((NULL == ch_obj) || (NULL == buf_info)) {
+        CDBG_ERROR("%s : Invalid channel/buffer", __func__);
+        return -ENODEV;
+    }
+
+    /* send cam_sem_post to wake up channel cmd thread to enqueue
+     * to super buffer */
+    node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+    if (NULL != node) {
+        memset(node, 0, sizeof(mm_camera_cmdcb_t));
+        node->cmd_type = MM_CAMERA_CMD_TYPE_DATA_CB;
+        node->u.buf = *buf_info;
+
+        /* enqueue to cmd thread */
+        cam_queue_enq(&(ch_obj->cmd_thread.cmd_queue), node);
+
+        /* wake up cmd thread */
+        cam_sem_post(&(ch_obj->cmd_thread.cmd_sem));
+    } else {
+        CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+        rc = -ENOMEM;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_handle_rcvd_buf
+ *
+ * DESCRIPTION: function to handle newly received stream buffer
+ *
+ * PARAMETERS :
+ *   @cam_obj : stream object
+ *   @buf_info: ptr to struct storing buffer information
+ *
+ * RETURN     : none
+ *==========================================================================*/
+void mm_stream_handle_rcvd_buf(mm_stream_t *my_obj,
+                               mm_camera_buf_info_t *buf_info,
+                               uint8_t has_cb)
+{
+    int32_t rc = 0;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    /* enqueue to super buf thread */
+    if (my_obj->is_bundled) {
+        rc = mm_stream_notify_channel(my_obj->ch_obj, buf_info);
+        if (rc < 0) {
+            CDBG_ERROR("%s: Unable to notify channel", __func__);
+        }
+    }
+
+    pthread_mutex_lock(&my_obj->buf_lock);
+    if(my_obj->is_linked) {
+        /* need to add into super buf for linking, add ref count */
+        my_obj->buf_status[buf_info->buf->buf_idx].buf_refcnt++;
+
+        rc = mm_stream_notify_channel(my_obj->linked_obj, buf_info);
+        if (rc < 0) {
+            CDBG_ERROR("%s: Unable to notify channel", __func__);
+        }
+    }
+    pthread_mutex_unlock(&my_obj->buf_lock);
+
+    if(has_cb) {
+        mm_camera_cmdcb_t* node = NULL;
+
+        /* send cam_sem_post to wake up cmd thread to dispatch dataCB */
+        node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+        if (NULL != node) {
+            memset(node, 0, sizeof(mm_camera_cmdcb_t));
+            node->cmd_type = MM_CAMERA_CMD_TYPE_DATA_CB;
+            node->u.buf = *buf_info;
+
+            /* enqueue to cmd thread */
+            cam_queue_enq(&(my_obj->cmd_thread.cmd_queue), node);
+
+            /* wake up cmd thread */
+            cam_sem_post(&(my_obj->cmd_thread.cmd_sem));
+        } else {
+            CDBG_ERROR("%s: No memory for mm_camera_node_t", __func__);
+        }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_data_notify
+ *
+ * DESCRIPTION: callback to handle data notify from kernel
+ *
+ * PARAMETERS :
+ *   @user_data : user data ptr (stream object)
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void mm_stream_data_notify(void* user_data)
+{
+    mm_stream_t *my_obj = (mm_stream_t*)user_data;
+    int32_t i, rc;
+    uint8_t has_cb = 0, length = 0;
+    mm_camera_buf_info_t buf_info;
+
+    if (NULL == my_obj) {
+        return;
+    }
+
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+    if (MM_STREAM_STATE_ACTIVE != my_obj->state) {
+        /* this Cb will only received in active_stream_on state
+         * if not so, return here */
+        CDBG_ERROR("%s: ERROR!! Wrong state (%d) to receive data notify!",
+                   __func__, my_obj->state);
+        return;
+    }
+
+    if (my_obj->stream_info->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        length = 1;
+    } else {
+        length = my_obj->frame_offset.num_planes;
+    }
+
+    memset(&buf_info, 0, sizeof(mm_camera_buf_info_t));
+    rc = mm_stream_read_msm_frame(my_obj, &buf_info,
+        (uint8_t)length);
+    if (rc != 0) {
+        return;
+    }
+    uint32_t idx = buf_info.buf->buf_idx;
+
+    pthread_mutex_lock(&my_obj->cb_lock);
+    for (i = 0; i < MM_CAMERA_STREAM_BUF_CB_MAX; i++) {
+        if(NULL != my_obj->buf_cb[i].cb) {
+            /* for every CB, add ref count */
+            has_cb = 1;
+            break;
+        }
+    }
+    pthread_mutex_unlock(&my_obj->cb_lock);
+
+    pthread_mutex_lock(&my_obj->buf_lock);
+    /* update buffer location */
+    my_obj->buf_status[idx].in_kernel = 0;
+
+    /* update buf ref count */
+    if (my_obj->is_bundled) {
+        /* need to add into super buf since bundled, add ref count */
+        my_obj->buf_status[idx].buf_refcnt++;
+    }
+    my_obj->buf_status[idx].buf_refcnt =
+        (uint8_t)(my_obj->buf_status[idx].buf_refcnt + has_cb);
+    pthread_mutex_unlock(&my_obj->buf_lock);
+
+    mm_stream_handle_rcvd_buf(my_obj, &buf_info, has_cb);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_dispatch_app_data
+ *
+ * DESCRIPTION: dispatch stream buffer to registered users
+ *
+ * PARAMETERS :
+ *   @cmd_cb  : ptr storing stream buffer information
+ *   @userdata: user data ptr (stream object)
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void mm_stream_dispatch_app_data(mm_camera_cmdcb_t *cmd_cb,
+                                        void* user_data)
+{
+    int i;
+    mm_stream_t * my_obj = (mm_stream_t *)user_data;
+    mm_camera_buf_info_t* buf_info = NULL;
+    mm_camera_super_buf_t super_buf;
+    mm_camera_cmd_thread_name("mm_cam_stream");
+
+    if (NULL == my_obj) {
+        return;
+    }
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    if (MM_CAMERA_CMD_TYPE_DATA_CB != cmd_cb->cmd_type) {
+        CDBG_ERROR("%s: Wrong cmd_type (%d) for dataCB",
+                   __func__, cmd_cb->cmd_type);
+        return;
+    }
+
+    buf_info = &cmd_cb->u.buf;
+    memset(&super_buf, 0, sizeof(mm_camera_super_buf_t));
+    super_buf.num_bufs = 1;
+    super_buf.bufs[0] = buf_info->buf;
+    super_buf.camera_handle = my_obj->ch_obj->cam_obj->my_hdl;
+    super_buf.ch_id = my_obj->ch_obj->my_hdl;
+
+    pthread_mutex_lock(&my_obj->cb_lock);
+    for(i = 0; i < MM_CAMERA_STREAM_BUF_CB_MAX; i++) {
+        if(NULL != my_obj->buf_cb[i].cb) {
+            if (my_obj->buf_cb[i].cb_count != 0) {
+                /* if <0, means infinite CB
+                 * if >0, means CB for certain times
+                 * both case we need to call CB */
+
+                /* increase buf ref cnt */
+                pthread_mutex_lock(&my_obj->buf_lock);
+                my_obj->buf_status[buf_info->buf->buf_idx].buf_refcnt++;
+                pthread_mutex_unlock(&my_obj->buf_lock);
+
+                /* callback */
+                my_obj->buf_cb[i].cb(&super_buf,
+                                     my_obj->buf_cb[i].user_data);
+            }
+
+            /* if >0, reduce count by 1 every time we called CB until reaches 0
+             * when count reach 0, reset the buf_cb to have no CB */
+            if (my_obj->buf_cb[i].cb_count > 0) {
+                my_obj->buf_cb[i].cb_count--;
+                if (0 == my_obj->buf_cb[i].cb_count) {
+                    my_obj->buf_cb[i].cb = NULL;
+                    my_obj->buf_cb[i].user_data = NULL;
+                }
+            }
+        }
+    }
+    pthread_mutex_unlock(&my_obj->cb_lock);
+
+    /* do buf_done since we increased refcnt by one when has_cb */
+    mm_stream_buf_done(my_obj, buf_info->buf);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_fsm_fn
+ *
+ * DESCRIPTION: stream finite state machine entry function. Depends on stream
+ *              state, incoming event will be handled differently.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a stream object
+ *   @evt      : stream event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_fsm_fn(mm_stream_t *my_obj,
+                         mm_stream_evt_type_t evt,
+                         void * in_val,
+                         void * out_val)
+{
+    int32_t rc = -1;
+
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+    switch (my_obj->state) {
+    case MM_STREAM_STATE_NOTUSED:
+        CDBG("%s: Not handling evt in unused state", __func__);
+        break;
+    case MM_STREAM_STATE_INITED:
+        rc = mm_stream_fsm_inited(my_obj, evt, in_val, out_val);
+        break;
+    case MM_STREAM_STATE_ACQUIRED:
+        rc = mm_stream_fsm_acquired(my_obj, evt, in_val, out_val);
+        break;
+    case MM_STREAM_STATE_CFG:
+        rc = mm_stream_fsm_cfg(my_obj, evt, in_val, out_val);
+        break;
+    case MM_STREAM_STATE_BUFFED:
+        rc = mm_stream_fsm_buffed(my_obj, evt, in_val, out_val);
+        break;
+    case MM_STREAM_STATE_REG:
+        rc = mm_stream_fsm_reg(my_obj, evt, in_val, out_val);
+        break;
+    case MM_STREAM_STATE_ACTIVE:
+        rc = mm_stream_fsm_active(my_obj, evt, in_val, out_val);
+        break;
+    default:
+        CDBG("%s: Not a valid state (%d)", __func__, my_obj->state);
+        break;
+    }
+    CDBG("%s : X rc =%d",__func__,rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_fsm_inited
+ *
+ * DESCRIPTION: stream finite state machine function to handle event in INITED
+ *              state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a stream object
+ *   @evt      : stream event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_fsm_inited(mm_stream_t *my_obj,
+                             mm_stream_evt_type_t evt,
+                             void * in_val,
+                             void * out_val)
+{
+    int32_t rc = 0;
+    char dev_name[MM_CAMERA_DEV_NAME_LEN];
+    const char *dev_name_value = NULL;
+    if (NULL == my_obj) {
+      CDBG_ERROR("%s: NULL camera object\n", __func__);
+      return -1;
+    }
+
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+    switch(evt) {
+    case MM_STREAM_EVT_ACQUIRE:
+        if ((NULL == my_obj->ch_obj) || (NULL == my_obj->ch_obj->cam_obj)) {
+            CDBG_ERROR("%s: NULL channel or camera obj\n", __func__);
+            rc = -1;
+            break;
+        }
+
+        dev_name_value = mm_camera_util_get_dev_name(my_obj->ch_obj->cam_obj->my_hdl);
+        if (NULL == dev_name_value) {
+            CDBG_ERROR("%s: NULL device name\n", __func__);
+            rc = -1;
+            break;
+        }
+
+        dev_name_value = mm_camera_util_get_dev_name(my_obj->ch_obj->cam_obj->my_hdl);
+        if (NULL == dev_name_value) {
+            CDBG_ERROR("%s: NULL device name\n", __func__);
+            rc = -1;
+            break;
+        }
+        snprintf(dev_name, sizeof(dev_name), "/dev/%s",
+                 dev_name_value);
+
+        my_obj->fd = open(dev_name, O_RDWR | O_NONBLOCK);
+        if (my_obj->fd < 0) {
+            CDBG_ERROR("%s: open dev returned %d\n", __func__, my_obj->fd);
+            rc = -1;
+            break;
+        }
+        CDBG("%s: open dev fd = %d\n", __func__, my_obj->fd);
+        rc = mm_stream_set_ext_mode(my_obj);
+        if (0 == rc) {
+            my_obj->state = MM_STREAM_STATE_ACQUIRED;
+        } else {
+            /* failed setting ext_mode
+             * close fd */
+            close(my_obj->fd);
+            my_obj->fd = -1;
+            break;
+        }
+        break;
+    default:
+        CDBG_ERROR("%s: invalid state (%d) for evt (%d), in(%p), out(%p)",
+                   __func__, my_obj->state, evt, in_val, out_val);
+        break;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_fsm_acquired
+ *
+ * DESCRIPTION: stream finite state machine function to handle event in AQUIRED
+ *              state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a stream object
+ *   @evt      : stream event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_fsm_acquired(mm_stream_t *my_obj,
+                               mm_stream_evt_type_t evt,
+                               void * in_val,
+                               void * out_val)
+{
+    int32_t rc = 0;
+
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+    switch(evt) {
+    case MM_STREAM_EVT_SET_FMT:
+        {
+            mm_camera_stream_config_t *config =
+                (mm_camera_stream_config_t *)in_val;
+
+            rc = mm_stream_config(my_obj, config);
+
+            /* change state to configed */
+            my_obj->state = MM_STREAM_STATE_CFG;
+
+            break;
+        }
+    case MM_STREAM_EVT_RELEASE:
+        rc = mm_stream_release(my_obj);
+        /* change state to not used */
+         my_obj->state = MM_STREAM_STATE_NOTUSED;
+        break;
+    case MM_STREAM_EVT_SET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_set_parm(my_obj, payload->parms);
+        }
+        break;
+    case MM_STREAM_EVT_GET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_get_parm(my_obj, payload->parms);
+        }
+        break;
+    default:
+        CDBG_ERROR("%s: invalid state (%d) for evt (%d), in(%p), out(%p)",
+                   __func__, my_obj->state, evt, in_val, out_val);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_fsm_cfg
+ *
+ * DESCRIPTION: stream finite state machine function to handle event in CONFIGURED
+ *              state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a stream object
+ *   @evt      : stream event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_fsm_cfg(mm_stream_t * my_obj,
+                          mm_stream_evt_type_t evt,
+                          void * in_val,
+                          void * out_val)
+{
+    int32_t rc = 0;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+    switch(evt) {
+    case MM_STREAM_EVT_SET_FMT:
+        {
+            mm_camera_stream_config_t *config =
+                (mm_camera_stream_config_t *)in_val;
+
+            rc = mm_stream_config(my_obj, config);
+
+            /* change state to configed */
+            my_obj->state = MM_STREAM_STATE_CFG;
+
+            break;
+        }
+    case MM_STREAM_EVT_RELEASE:
+        rc = mm_stream_release(my_obj);
+        my_obj->state = MM_STREAM_STATE_NOTUSED;
+        break;
+    case MM_STREAM_EVT_SET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_set_parm(my_obj, payload->parms);
+        }
+        break;
+    case MM_STREAM_EVT_GET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_get_parm(my_obj, payload->parms);
+        }
+        break;
+    case MM_STREAM_EVT_GET_BUF:
+        rc = mm_stream_init_bufs(my_obj);
+        /* change state to buff allocated */
+        if(0 == rc) {
+            my_obj->state = MM_STREAM_STATE_BUFFED;
+        }
+        break;
+    default:
+        CDBG_ERROR("%s: invalid state (%d) for evt (%d), in(%p), out(%p)",
+                   __func__, my_obj->state, evt, in_val, out_val);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_fsm_buffed
+ *
+ * DESCRIPTION: stream finite state machine function to handle event in BUFFED
+ *              state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a stream object
+ *   @evt      : stream event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_fsm_buffed(mm_stream_t * my_obj,
+                             mm_stream_evt_type_t evt,
+                             void * in_val,
+                             void * out_val)
+{
+    int32_t rc = 0;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+    switch(evt) {
+    case MM_STREAM_EVT_PUT_BUF:
+        rc = mm_stream_deinit_bufs(my_obj);
+        /* change state to configed */
+        my_obj->state = MM_STREAM_STATE_CFG;
+        break;
+    case MM_STREAM_EVT_REG_BUF:
+        rc = mm_stream_reg_buf(my_obj);
+        /* change state to regged */
+        if(0 == rc) {
+            my_obj->state = MM_STREAM_STATE_REG;
+        }
+        break;
+    case MM_STREAM_EVT_SET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_set_parm(my_obj, payload->parms);
+        }
+        break;
+    case MM_STREAM_EVT_GET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_get_parm(my_obj, payload->parms);
+        }
+        break;
+    default:
+        CDBG_ERROR("%s: invalid state (%d) for evt (%d), in(%p), out(%p)",
+                   __func__, my_obj->state, evt, in_val, out_val);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_fsm_reg
+ *
+ * DESCRIPTION: stream finite state machine function to handle event in REGGED
+ *              state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a stream object
+ *   @evt      : stream event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_fsm_reg(mm_stream_t * my_obj,
+                          mm_stream_evt_type_t evt,
+                          void * in_val,
+                          void * out_val)
+{
+    int32_t rc = 0;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    switch(evt) {
+    case MM_STREAM_EVT_UNREG_BUF:
+        rc = mm_stream_unreg_buf(my_obj);
+
+        /* change state to buffed */
+        my_obj->state = MM_STREAM_STATE_BUFFED;
+        break;
+    case MM_STREAM_EVT_START:
+        {
+            uint8_t has_cb = 0;
+            uint8_t i;
+            /* launch cmd thread if CB is not null */
+            pthread_mutex_lock(&my_obj->cb_lock);
+            for (i = 0; i < MM_CAMERA_STREAM_BUF_CB_MAX; i++) {
+                if(NULL != my_obj->buf_cb[i].cb) {
+                    has_cb = 1;
+                    break;
+                }
+            }
+            pthread_mutex_unlock(&my_obj->cb_lock);
+
+            if (has_cb) {
+                snprintf(my_obj->cmd_thread.threadName, THREAD_NAME_SIZE, "CAM_StrmAppData");
+                mm_camera_cmd_thread_launch(&my_obj->cmd_thread,
+                                            mm_stream_dispatch_app_data,
+                                            (void *)my_obj);
+            }
+
+            my_obj->state = MM_STREAM_STATE_ACTIVE;
+            rc = mm_stream_streamon(my_obj);
+            if (0 != rc) {
+                /* failed stream on, need to release cmd thread if it's launched */
+                if (has_cb) {
+                    mm_camera_cmd_thread_release(&my_obj->cmd_thread);
+                }
+                my_obj->state = MM_STREAM_STATE_REG;
+                break;
+            }
+        }
+        break;
+    case MM_STREAM_EVT_SET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_set_parm(my_obj, payload->parms);
+        }
+        break;
+    case MM_STREAM_EVT_GET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_get_parm(my_obj, payload->parms);
+        }
+        break;
+    default:
+        CDBG_ERROR("%s: invalid state (%d) for evt (%d), in(%p), out(%p)",
+                   __func__, my_obj->state, evt, in_val, out_val);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_fsm_active
+ *
+ * DESCRIPTION: stream finite state machine function to handle event in ACTIVE
+ *              state.
+ *
+ * PARAMETERS :
+ *   @my_obj   : ptr to a stream object
+ *   @evt      : stream event to be processed
+ *   @in_val   : input event payload. Can be NULL if not needed.
+ *   @out_val  : output payload, Can be NULL if not needed.
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_fsm_active(mm_stream_t * my_obj,
+                             mm_stream_evt_type_t evt,
+                             void * in_val,
+                             void * out_val)
+{
+    int32_t rc = 0;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+    switch(evt) {
+    case MM_STREAM_EVT_QBUF:
+        rc = mm_stream_buf_done(my_obj, (mm_camera_buf_def_t *)in_val);
+        break;
+    case MM_STREAM_EVT_GET_QUEUED_BUF_COUNT:
+        rc = mm_stream_get_queued_buf_count(my_obj);
+        break;
+    case MM_STREAM_EVT_STOP:
+        {
+            uint8_t has_cb = 0;
+            uint8_t i;
+            rc = mm_stream_streamoff(my_obj);
+
+            pthread_mutex_lock(&my_obj->cb_lock);
+            for (i = 0; i < MM_CAMERA_STREAM_BUF_CB_MAX; i++) {
+                if(NULL != my_obj->buf_cb[i].cb) {
+                    has_cb = 1;
+                    break;
+                }
+            }
+            pthread_mutex_unlock(&my_obj->cb_lock);
+
+            if (has_cb) {
+                mm_camera_cmd_thread_release(&my_obj->cmd_thread);
+            }
+            my_obj->state = MM_STREAM_STATE_REG;
+        }
+        break;
+    case MM_STREAM_EVT_SET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_set_parm(my_obj, payload->parms);
+        }
+        break;
+    case MM_STREAM_EVT_GET_PARM:
+        {
+            mm_evt_paylod_set_get_stream_parms_t *payload =
+                (mm_evt_paylod_set_get_stream_parms_t *)in_val;
+            rc = mm_stream_get_parm(my_obj, payload->parms);
+        }
+        break;
+    case MM_STREAM_EVT_DO_ACTION:
+        rc = mm_stream_do_action(my_obj, in_val);
+        break;
+    default:
+        CDBG_ERROR("%s: invalid state (%d) for evt (%d), in(%p), out(%p)",
+                   __func__, my_obj->state, evt, in_val, out_val);
+    }
+    CDBG("%s :X rc = %d", __func__, rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_config
+ *
+ * DESCRIPTION: configure a stream
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @config       : stream configuration
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_config(mm_stream_t *my_obj,
+                         mm_camera_stream_config_t *config)
+{
+    int32_t rc = 0;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+    my_obj->stream_info = config->stream_info;
+    my_obj->buf_num = (uint8_t) config->stream_info->num_bufs;
+    my_obj->mem_vtbl = config->mem_vtbl;
+    my_obj->padding_info = config->padding_info;
+    /* cd through intf always palced at idx 0 of buf_cb */
+    my_obj->buf_cb[0].cb = config->stream_cb;
+    my_obj->buf_cb[0].user_data = config->userdata;
+    my_obj->buf_cb[0].cb_count = -1; /* infinite by default */
+
+    rc = mm_stream_sync_info(my_obj);
+    if (rc == 0) {
+        rc = mm_stream_set_fmt(my_obj);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_release
+ *
+ * DESCRIPTION: release a stream resource
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_release(mm_stream_t *my_obj)
+{
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    /* close fd */
+    if(my_obj->fd >= 0)
+    {
+        close(my_obj->fd);
+    }
+
+    /* destroy mutex */
+    pthread_mutex_destroy(&my_obj->buf_lock);
+    pthread_mutex_destroy(&my_obj->cb_lock);
+
+    /* reset stream obj */
+    memset(my_obj, 0, sizeof(mm_stream_t));
+    my_obj->fd = -1;
+
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_streamon
+ *
+ * DESCRIPTION: stream on a stream. sending v4l2 request to kernel
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_streamon(mm_stream_t *my_obj)
+{
+    int32_t rc;
+    enum v4l2_buf_type buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    rc = ioctl(my_obj->fd, VIDIOC_STREAMON, &buf_type);
+    if (rc < 0) {
+        CDBG_ERROR("%s: ioctl VIDIOC_STREAMON failed: rc=%d\n",
+                   __func__, rc);
+        /* remove fd from data poll thread in case of failure */
+        mm_camera_poll_thread_del_poll_fd(&my_obj->ch_obj->poll_thread[0], my_obj->my_hdl, mm_camera_sync_call);
+    }
+    CDBG("%s :X rc = %d",__func__,rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_streamoff
+ *
+ * DESCRIPTION: stream off a stream. sending v4l2 request to kernel
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_streamoff(mm_stream_t *my_obj)
+{
+    int32_t rc;
+    enum v4l2_buf_type buf_type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    /* step1: remove fd from data poll thread */
+    rc = mm_camera_poll_thread_del_poll_fd(&my_obj->ch_obj->poll_thread[0],
+            my_obj->my_hdl, mm_camera_sync_call);
+    if (rc < 0) {
+        /* The error might be due to async update. In this case
+         * wait for all updates to complete before proceeding. */
+        rc = mm_camera_poll_thread_commit_updates(&my_obj->ch_obj->poll_thread[0]);
+        if (rc < 0) {
+            CDBG_ERROR("%s: Poll sync failed %d",
+                 __func__, rc);
+        }
+    }
+
+    /* step2: stream off */
+    rc = ioctl(my_obj->fd, VIDIOC_STREAMOFF, &buf_type);
+    if (rc < 0) {
+        CDBG_ERROR("%s: STREAMOFF failed: %s\n",
+                __func__, strerror(errno));
+    }
+    CDBG("%s :X rc = %d",__func__,rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_write_user_buf
+ *
+ * DESCRIPTION: dequeue a stream buffer from user buffer queue and fill internal structure
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @buf     : ptr to a struct storing buffer information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_write_user_buf(mm_stream_t * my_obj,
+        mm_camera_buf_def_t *buf)
+{
+    int32_t rc = 0, i;
+    int32_t index = -1, count = 0;
+    struct msm_camera_user_buf_cont_t *cont_buf = NULL;
+
+    if (buf->buf_type == CAM_STREAM_BUF_TYPE_USERPTR) {
+        my_obj->buf_status[buf->buf_idx].buf_refcnt--;
+        if (0 == my_obj->buf_status[buf->buf_idx].buf_refcnt) {
+            cont_buf = (struct msm_camera_user_buf_cont_t *)my_obj->buf[buf->buf_idx].buffer;
+            cont_buf->buf_cnt = my_obj->buf[buf->buf_idx].user_buf.bufs_used;
+            for (i = 0; i < (int32_t)cont_buf->buf_cnt; i++) {
+                cont_buf->buf_idx[i] = my_obj->buf[buf->buf_idx].user_buf.buf_idx[i];
+            }
+            rc = mm_stream_qbuf(my_obj, buf);
+            if(rc < 0) {
+                CDBG_ERROR("%s: mm_camera_stream_qbuf(idx=%d) err=%d\n",
+                           __func__, buf->buf_idx, rc);
+            } else {
+                for (i = 0; i < (int32_t)cont_buf->buf_cnt; i++) {
+                    my_obj->buf[buf->buf_idx].user_buf.buf_idx[i] = -1;
+                }
+                my_obj->buf_status[buf->buf_idx].in_kernel = 1;
+                my_obj->buf[buf->buf_idx].user_buf.buf_in_use = 1;
+            }
+        } else {
+            CDBG("<DEBUG> : ref count pending count :%d idx = %d",
+                 my_obj->buf_status[buf->buf_idx].buf_refcnt, buf->buf_idx);
+        }
+        return rc;
+    }
+
+    if ((my_obj->cur_buf_idx < 0)
+            || (my_obj->cur_buf_idx >= my_obj->buf_num)) {
+        for (i = 0; i < my_obj->buf_num; i++) {
+            if ((my_obj->buf_status[i].in_kernel)
+                    || (my_obj->buf[i].user_buf.buf_in_use)) {
+                continue;
+            }
+
+            my_obj->cur_buf_idx = index = i;
+            break;
+        }
+    } else {
+        index = my_obj->cur_buf_idx;
+    }
+
+    if (index == -1) {
+        CDBG_ERROR("%s: No Free batch buffer", __func__);
+        rc = -1;
+        return rc;
+    }
+
+    //Insert Buffer to Batch structure.
+    my_obj->buf[index].user_buf.buf_idx[count] = buf->buf_idx;
+    my_obj->cur_bufs_staged++;
+
+    CDBG("%s index = %d filled = %d used = %d", __func__,
+            index,
+            my_obj->cur_bufs_staged,
+            my_obj->buf[index].user_buf.bufs_used);
+
+    if (my_obj->cur_bufs_staged
+            == my_obj->buf[index].user_buf.bufs_used){
+        my_obj->buf_status[index].buf_refcnt--;
+        if (0 == my_obj->buf_status[index].buf_refcnt) {
+            cont_buf = (struct msm_camera_user_buf_cont_t *)my_obj->buf[index].buffer;
+            cont_buf->buf_cnt = my_obj->buf[index].user_buf.bufs_used;
+            for (i = 0; i < (int32_t)cont_buf->buf_cnt; i++) {
+                cont_buf->buf_idx[i] = my_obj->buf[index].user_buf.buf_idx[i];
+            }
+            rc = mm_stream_qbuf(my_obj, &my_obj->buf[index]);
+            if(rc < 0) {
+                CDBG_ERROR("%s: mm_camera_stream_qbuf(idx=%d) err=%d\n",
+                           __func__, index, rc);
+            } else {
+                for (i = 0; i < (int32_t)cont_buf->buf_cnt; i++) {
+                    my_obj->buf[index].user_buf.buf_idx[i] = -1;
+                }
+                my_obj->buf_status[index].in_kernel = 1;
+                my_obj->buf[index].user_buf.buf_in_use = 1;
+                my_obj->cur_bufs_staged = 0;
+                my_obj->cur_buf_idx = -1;
+            }
+        }else{
+            CDBG("<DEBUG> : ref count pending count :%d idx = %d",
+                 my_obj->buf_status[index].buf_refcnt, index);
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_read_user_buf
+ *
+ * DESCRIPTION: dequeue a stream buffer from user buffer queue and fill internal structure
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @buf_info     : ptr to a struct storing buffer information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_read_user_buf(mm_stream_t * my_obj,
+        mm_camera_buf_info_t* buf_info)
+{
+    int32_t rc = 0, i;
+    mm_camera_buf_def_t *stream_buf  = NULL;
+    struct msm_camera_user_buf_cont_t *user_buf = NULL;
+    nsecs_t interval_nsec = 0, frame_ts = 0, timeStamp = 0;
+    int ts_delta = 0;
+    uint32_t frameID = 0;
+
+    user_buf = (struct msm_camera_user_buf_cont_t *)buf_info->buf->buffer;
+
+    if(user_buf != my_obj->buf[buf_info->buf->buf_idx].buffer) {
+        CDBG_ERROR("%s : Buffer modified. ERROR",__func__);
+        rc = -1;
+        return rc;
+    }
+
+    if (buf_info->buf->frame_idx == 1) {
+        frameID = buf_info->buf->frame_idx;
+    }else {
+        frameID = (buf_info->buf->frame_idx - 1) * user_buf->buf_cnt;
+    }
+
+    timeStamp = (nsecs_t)(buf_info->buf->ts.tv_sec) *
+            1000000000LL + buf_info->buf->ts.tv_nsec;
+
+    if (timeStamp <= my_obj->prev_timestamp) {
+        CDBG_ERROR("%s: TimeStamp received less than expected", __func__);
+        mm_stream_qbuf(my_obj, buf_info->buf);
+        return rc;
+    } else if (my_obj->prev_timestamp == 0
+            || (my_obj->prev_frameID != buf_info->buf->frame_idx + 1)) {
+        /* For first frame or incase batch is droped */
+        interval_nsec = ((my_obj->stream_info->user_buf_info.frameInterval) * 1000000);
+        my_obj->prev_timestamp = (timeStamp - (nsecs_t)(user_buf->buf_cnt * interval_nsec));
+    } else {
+        ts_delta = timeStamp - my_obj->prev_timestamp;
+        interval_nsec = (nsecs_t)(ts_delta / user_buf->buf_cnt);
+        CDBG("%s: Timestamp delta = %d timestamp = %lld",__func__, ts_delta, timeStamp);
+    }
+
+    for (i = 0; i < (int32_t)user_buf->buf_cnt; i++) {
+        buf_info->buf->user_buf.buf_idx[i] = user_buf->buf_idx[i];
+        stream_buf = &my_obj->plane_buf[user_buf->buf_idx[i]];
+        stream_buf->frame_idx = frameID + i;
+
+        frame_ts  = (i * interval_nsec) + my_obj->prev_timestamp;
+
+        stream_buf->ts.tv_sec  = (frame_ts / 1000000000LL);
+        stream_buf->ts.tv_nsec = (frame_ts - (stream_buf->ts.tv_sec * 1000000000LL));
+        stream_buf->is_uv_subsampled = buf_info->buf->is_uv_subsampled;
+
+        CDBG("%s: buf_index %d, frame_idx %d, stream type %d, timestamp = %lld",
+                __func__, stream_buf->buf_idx, stream_buf->frame_idx,
+                my_obj->stream_info->stream_type, frame_ts);
+    }
+
+    buf_info->buf->ts.tv_sec  = (my_obj->prev_timestamp / 1000000000LL);
+    buf_info->buf->ts.tv_nsec = (my_obj->prev_timestamp -
+            (buf_info->buf->ts.tv_sec * 1000000000LL));
+
+    buf_info->buf->user_buf.bufs_used = user_buf->buf_cnt;
+    buf_info->buf->user_buf.buf_in_use = 1;
+
+    my_obj->prev_timestamp = timeStamp;
+    my_obj->prev_frameID = buf_info->buf->frame_idx;
+
+    CDBG("%s :X rc = %d",__func__,rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_read_msm_frame
+ *
+ * DESCRIPTION: dequeue a stream buffer from kernel queue
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @buf_info     : ptr to a struct storing buffer information
+ *   @num_planes   : number of planes in the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_read_msm_frame(mm_stream_t * my_obj,
+                                 mm_camera_buf_info_t* buf_info,
+                                 uint8_t num_planes)
+{
+    int32_t rc = 0;
+    struct v4l2_buffer vb;
+    struct v4l2_plane planes[VIDEO_MAX_PLANES];
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    memset(&vb,  0,  sizeof(vb));
+    vb.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    vb.memory = V4L2_MEMORY_USERPTR;
+    vb.m.planes = &planes[0];
+    vb.length = num_planes;
+
+    rc = ioctl(my_obj->fd, VIDIOC_DQBUF, &vb);
+    if (0 > rc) {
+        CDBG_ERROR("%s: VIDIOC_DQBUF ioctl call failed on stream type %d (rc=%d): %s",
+            __func__, my_obj->stream_info->stream_type, rc, strerror(errno));
+    } else {
+        pthread_mutex_lock(&my_obj->buf_lock);
+        my_obj->queued_buffer_count--;
+        if (0 == my_obj->queued_buffer_count) {
+            CDBG_HIGH("%s: Stoping poll on stream %p type: %d", __func__,
+                my_obj, my_obj->stream_info->stream_type);
+            mm_camera_poll_thread_del_poll_fd(&my_obj->ch_obj->poll_thread[0],
+                my_obj->my_hdl, mm_camera_async_call);
+            CDBG_HIGH("%s: Stopped poll on stream %p type: %d", __func__,
+                my_obj, my_obj->stream_info->stream_type);
+        }
+        uint32_t idx = vb.index;
+        buf_info->buf = &my_obj->buf[idx];
+        buf_info->frame_idx = vb.sequence;
+        buf_info->stream_id = my_obj->my_hdl;
+
+        buf_info->buf->stream_id = my_obj->my_hdl;
+        buf_info->buf->buf_idx = idx;
+        buf_info->buf->frame_idx = vb.sequence;
+        buf_info->buf->ts.tv_sec  = vb.timestamp.tv_sec;
+        buf_info->buf->ts.tv_nsec = vb.timestamp.tv_usec * 1000;
+        buf_info->buf->flags = vb.flags;
+
+        CDBG_HIGH("%s: VIDIOC_DQBUF buf_index %d, frame_idx %d, stream type %d, rc %d,"
+                "queued: %d, buf_type = %d flags = %d",
+            __func__, vb.index, buf_info->buf->frame_idx,
+            my_obj->stream_info->stream_type, rc,
+            my_obj->queued_buffer_count, buf_info->buf->buf_type,
+            buf_info->buf->flags);
+
+        buf_info->buf->is_uv_subsampled =
+            (vb.reserved == V4L2_PIX_FMT_NV14 || vb.reserved == V4L2_PIX_FMT_NV41);
+
+        if(buf_info->buf->buf_type == CAM_STREAM_BUF_TYPE_USERPTR) {
+            mm_stream_read_user_buf(my_obj, buf_info);
+        }
+        pthread_mutex_unlock(&my_obj->buf_lock);
+
+        if ( NULL != my_obj->mem_vtbl.clean_invalidate_buf ) {
+            rc = my_obj->mem_vtbl.clean_invalidate_buf(idx,
+                my_obj->mem_vtbl.user_data);
+            if (0 > rc) {
+                CDBG_ERROR("%s: Clean invalidate cache failed on buffer index: %d",
+                    __func__, idx);
+            }
+        } else {
+            CDBG_ERROR("%s: Clean invalidate cache op not supported", __func__);
+        }
+    }
+
+    CDBG("%s :X rc = %d",__func__,rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_set_parms
+ *
+ * DESCRIPTION: set parameters per stream
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @in_value     : ptr to a param struct to be set to server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Corresponding fields of parameters to be set
+ *              are already filled in by upper layer caller.
+ *==========================================================================*/
+int32_t mm_stream_set_parm(mm_stream_t *my_obj,
+                           cam_stream_parm_buffer_t *in_value)
+{
+    int32_t rc = -1;
+    int32_t value = 0;
+    if (in_value != NULL) {
+        rc = mm_camera_util_s_ctrl(my_obj->fd, CAM_PRIV_STREAM_PARM, &value);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_get_parms
+ *
+ * DESCRIPTION: get parameters per stream
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @in_value     : ptr to a param struct to be get from server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the parms struct buf is already mapped to server via
+ *              domain socket. Corresponding fields of parameters to be get
+ *              are already filled in by upper layer caller.
+ *==========================================================================*/
+int32_t mm_stream_get_parm(mm_stream_t *my_obj,
+                           cam_stream_parm_buffer_t *in_value)
+{
+    int32_t rc = -1;
+    int32_t value = 0;
+    if (in_value != NULL) {
+        rc = mm_camera_util_g_ctrl(my_obj->fd, CAM_PRIV_STREAM_PARM, &value);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_do_actions
+ *
+ * DESCRIPTION: request server to perform stream based actions
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @in_value     : ptr to a struct of actions to be performed by the server
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Assume the action struct buf is already mapped to server via
+ *              domain socket. Corresponding fields of actions to be performed
+ *              are already filled in by upper layer caller.
+ *==========================================================================*/
+int32_t mm_stream_do_action(mm_stream_t *my_obj,
+                            void *in_value)
+{
+    int32_t rc = -1;
+    int32_t value = 0;
+    if (in_value != NULL) {
+        rc = mm_camera_util_s_ctrl(my_obj->fd, CAM_PRIV_STREAM_PARM, &value);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_set_ext_mode
+ *
+ * DESCRIPTION: set stream extended mode to server via v4l2 ioctl
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : Server will return a server stream id that uniquely identify
+ *              this stream on server side. Later on communication to server
+ *              per stream should use this server stream id.
+ *==========================================================================*/
+int32_t mm_stream_set_ext_mode(mm_stream_t * my_obj)
+{
+    int32_t rc = 0;
+    struct v4l2_streamparm s_parm;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    memset(&s_parm, 0, sizeof(s_parm));
+    s_parm.type =  V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+    rc = ioctl(my_obj->fd, VIDIOC_S_PARM, &s_parm);
+    CDBG("%s:stream fd=%d, rc=%d, extended_mode=%d\n",
+         __func__, my_obj->fd, rc, s_parm.parm.capture.extendedmode);
+    if (rc == 0) {
+        /* get server stream id */
+        my_obj->server_stream_id = s_parm.parm.capture.extendedmode;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_qbuf
+ *
+ * DESCRIPTION: enqueue buffer back to kernel queue for furture use
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @buf          : ptr to a struct storing buffer information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_qbuf(mm_stream_t *my_obj, mm_camera_buf_def_t *buf)
+{
+    int32_t rc = 0;
+    uint32_t length = 0;
+    struct v4l2_buffer buffer;
+    struct v4l2_plane planes[VIDEO_MAX_PLANES];
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d, stream type = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state,
+         my_obj->stream_info->stream_type);
+
+    if (buf->buf_type == CAM_STREAM_BUF_TYPE_USERPTR) {
+        CDBG("%s: USERPTR num_buf = %d, idx = %d", __func__,
+                buf->user_buf.bufs_used, buf->buf_idx);
+        memset(&planes, 0, sizeof(planes));
+        planes[0].length = my_obj->stream_info->user_buf_info.size;
+        planes[0].m.userptr = buf->fd;
+        length = 1;
+    } else {
+        memcpy(planes, buf->planes_buf.planes, sizeof(planes));
+        length = buf->planes_buf.num_planes;
+
+        CDBG("%s:plane 0: stream_hdl=%u,fd=%d,frame idx=%d,num_planes = %u, "
+                "offset = %d, data_offset = %d\n", __func__,
+                 buf->stream_id, buf->fd, buffer.index, buffer.length,
+                 buf->planes_buf.planes[0].reserved[0],
+                 buf->planes_buf.planes[0].data_offset);
+        CDBG("%s:plane 1: stream_hdl=%u,fd=%d,frame idx=%d,num_planes = %u, "
+                "offset = %d, data_offset = %d\n", __func__,
+                 buf->stream_id, buf->fd, buffer.index, buffer.length,
+                 buf->planes_buf.planes[1].reserved[0],
+                 buf->planes_buf.planes[1].data_offset);
+    }
+
+    memset(&buffer, 0, sizeof(buffer));
+    buffer.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    buffer.memory = V4L2_MEMORY_USERPTR;
+    buffer.index = (__u32)buf->buf_idx;
+    buffer.m.planes = &planes[0];
+    buffer.length = (__u32)length;
+
+    if ( NULL != my_obj->mem_vtbl.invalidate_buf ) {
+        rc = my_obj->mem_vtbl.invalidate_buf(buffer.index,
+                                             my_obj->mem_vtbl.user_data);
+        if ( 0 > rc ) {
+            CDBG_ERROR("%s: Cache invalidate failed on buffer index: %d",
+                       __func__,
+                       buffer.index);
+            return rc;
+        }
+    } else {
+        CDBG_ERROR("%s: Cache invalidate op not added", __func__);
+    }
+
+    my_obj->queued_buffer_count++;
+    if (1 == my_obj->queued_buffer_count) {
+        /* Add fd to data poll thread */
+        CDBG_HIGH("%s: Starting poll on stream %p type: %d", __func__,
+            my_obj,my_obj->stream_info->stream_type);
+        rc = mm_camera_poll_thread_add_poll_fd(&my_obj->ch_obj->poll_thread[0],
+            my_obj->my_hdl, my_obj->fd, mm_stream_data_notify, (void*)my_obj,
+            mm_camera_async_call);
+        if (0 > rc) {
+            CDBG_ERROR("%s: Add poll on stream %p type: %d fd error (rc=%d)",
+                __func__, my_obj, my_obj->stream_info->stream_type, rc);
+        } else {
+            CDBG_HIGH("%s: Started poll on stream %p type: %d", __func__,
+                my_obj, my_obj->stream_info->stream_type);
+        }
+    }
+
+    rc = ioctl(my_obj->fd, VIDIOC_QBUF, &buffer);
+    if (0 > rc) {
+        CDBG_ERROR("%s: VIDIOC_QBUF ioctl call failed on stream type %d (rc=%d): %s",
+            __func__, my_obj->stream_info->stream_type, rc, strerror(errno));
+        my_obj->queued_buffer_count--;
+        if (0 == my_obj->queued_buffer_count) {
+            /* Remove fd from data poll in case of failing
+             * first buffer queuing attempt */
+            CDBG_HIGH("%s: Stoping poll on stream %p type: %d", __func__,
+                my_obj, my_obj->stream_info->stream_type);
+            mm_camera_poll_thread_del_poll_fd(&my_obj->ch_obj->poll_thread[0],
+                my_obj->my_hdl, mm_camera_async_call);
+            CDBG_HIGH("%s: Stopped poll on stream %p type: %d", __func__,
+                my_obj, my_obj->stream_info->stream_type);
+        }
+    } else {
+        CDBG_HIGH("%s: VIDIOC_QBUF buf_index %d, stream type %d, rc %d, queued: %d", __func__,
+            buffer.index, my_obj->stream_info->stream_type, rc, my_obj->queued_buffer_count);
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_request_buf
+ *
+ * DESCRIPTION: This function let kernel know the amount of buffers need to
+ *              be registered via v4l2 ioctl.
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_request_buf(mm_stream_t * my_obj)
+{
+    int32_t rc = 0;
+    struct v4l2_requestbuffers bufreq;
+    uint8_t buf_num = my_obj->buf_num;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    CDBG("%s: buf_num = %d, stream type = %d",
+         __func__, buf_num, my_obj->stream_info->stream_type);
+
+    if(buf_num > MM_CAMERA_MAX_NUM_FRAMES) {
+        CDBG_ERROR("%s: buf num %d > max limit %d\n",
+                   __func__, buf_num, MM_CAMERA_MAX_NUM_FRAMES);
+        return -1;
+    }
+
+    memset(&bufreq, 0, sizeof(bufreq));
+    bufreq.count = buf_num;
+    bufreq.type  = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    bufreq.memory = V4L2_MEMORY_USERPTR;
+    rc = ioctl(my_obj->fd, VIDIOC_REQBUFS, &bufreq);
+    if (rc < 0) {
+      CDBG_ERROR("%s: fd=%d, ioctl VIDIOC_REQBUFS failed: rc=%d\n",
+           __func__, my_obj->fd, rc);
+    }
+
+    CDBG("%s :X rc = %d",__func__,rc);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_map_buf
+ *
+ * DESCRIPTION: mapping stream buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @buf_type     : type of buffer to be mapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_INFO
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @frame_idx    : index of buffer within the stream buffers, only valid if
+ *                   buf_type is CAM_MAPPING_BUF_TYPE_STREAM_BUF or
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @plane_idx    : plane index. If all planes share the same fd,
+ *                   plane_idx = -1; otherwise, plean_idx is the
+ *                   index to plane (0..num_of_planes)
+ *   @fd           : file descriptor of the buffer
+ *   @size         : size of the buffer
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_map_buf(mm_stream_t * my_obj,
+                          uint8_t buf_type,
+                          uint32_t frame_idx,
+                          int32_t plane_idx,
+                          int32_t fd,
+                          size_t size)
+{
+    if (NULL == my_obj || NULL == my_obj->ch_obj || NULL == my_obj->ch_obj->cam_obj) {
+        CDBG_ERROR("%s: NULL obj of stream/channel/camera", __func__);
+        return -1;
+    }
+    cam_sock_packet_t packet;
+    memset(&packet, 0, sizeof(cam_sock_packet_t));
+    packet.msg_type = CAM_MAPPING_TYPE_FD_MAPPING;
+    packet.payload.buf_map.type = buf_type;
+    packet.payload.buf_map.fd = fd;
+    packet.payload.buf_map.size = size;
+    packet.payload.buf_map.stream_id = my_obj->server_stream_id;
+    packet.payload.buf_map.frame_idx = frame_idx;
+    packet.payload.buf_map.plane_idx = plane_idx;
+
+    CDBG("%s: mapping buf_type %d, stream_id %d, frame_idx %d, fd %d, size %d",
+            __func__, buf_type, my_obj->server_stream_id, frame_idx, fd, size);
+    return mm_camera_util_sendmsg(my_obj->ch_obj->cam_obj,
+                                  &packet,
+                                  sizeof(cam_sock_packet_t),
+                                  fd);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_unmap_buf
+ *
+ * DESCRIPTION: unmapping stream buffer via domain socket to server
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @buf_type     : type of buffer to be unmapped. could be following values:
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_BUF
+ *                   CAM_MAPPING_BUF_TYPE_STREAM_INFO
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @frame_idx    : index of buffer within the stream buffers, only valid if
+ *                   buf_type is CAM_MAPPING_BUF_TYPE_STREAM_BUF or
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @plane_idx    : plane index. If all planes share the same fd,
+ *                   plane_idx = -1; otherwise, plean_idx is the
+ *                   index to plane (0..num_of_planes)
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_unmap_buf(mm_stream_t * my_obj,
+                            uint8_t buf_type,
+                            uint32_t frame_idx,
+                            int32_t plane_idx)
+{
+    if (NULL == my_obj || NULL == my_obj->ch_obj || NULL == my_obj->ch_obj->cam_obj) {
+        CDBG_ERROR("%s: NULL obj of stream/channel/camera", __func__);
+        return -1;
+    }
+    cam_sock_packet_t packet;
+    memset(&packet, 0, sizeof(cam_sock_packet_t));
+    packet.msg_type = CAM_MAPPING_TYPE_FD_UNMAPPING;
+    packet.payload.buf_unmap.type = buf_type;
+    packet.payload.buf_unmap.stream_id = my_obj->server_stream_id;
+    packet.payload.buf_unmap.frame_idx = frame_idx;
+    packet.payload.buf_unmap.plane_idx = plane_idx;
+    return mm_camera_util_sendmsg(my_obj->ch_obj->cam_obj,
+                                  &packet,
+                                  sizeof(cam_sock_packet_t),
+                                  -1);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_map_buf_ops
+ *
+ * DESCRIPTION: ops for mapping stream buffer via domain socket to server.
+ *              This function will be passed to upper layer as part of ops table
+ *              to be used by upper layer when allocating stream buffers and mapping
+ *              buffers to server via domain socket.
+ *
+ * PARAMETERS :
+ *   @frame_idx    : index of buffer within the stream buffers, only valid if
+ *                   buf_type is CAM_MAPPING_BUF_TYPE_STREAM_BUF or
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @plane_idx    : plane index. If all planes share the same fd,
+ *                   plane_idx = -1; otherwise, plean_idx is the
+ *                   index to plane (0..num_of_planes)
+ *   @fd           : file descriptor of the buffer
+ *   @size         : size of the buffer
+ *   @userdata     : user data ptr (stream object)
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_stream_map_buf_ops(uint32_t frame_idx,
+                                     int32_t plane_idx,
+                                     int fd,
+                                     size_t size,
+                                     cam_mapping_buf_type type,
+                                     void *userdata)
+{
+    mm_stream_t *my_obj = (mm_stream_t *)userdata;
+    return mm_stream_map_buf(my_obj,
+                             type,
+                             frame_idx, plane_idx, fd, size);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_unmap_buf_ops
+ *
+ * DESCRIPTION: ops for unmapping stream buffer via domain socket to server.
+ *              This function will be passed to upper layer as part of ops table
+ *              to be used by upper layer when allocating stream buffers and unmapping
+ *              buffers to server via domain socket.
+ *
+ * PARAMETERS :
+ *   @frame_idx    : index of buffer within the stream buffers, only valid if
+ *                   buf_type is CAM_MAPPING_BUF_TYPE_STREAM_BUF or
+ *                   CAM_MAPPING_BUF_TYPE_OFFLINE_INPUT_BUF
+ *   @plane_idx    : plane index. If all planes share the same fd,
+ *                   plane_idx = -1; otherwise, plean_idx is the
+ *                   index to plane (0..num_of_planes)
+ *   @userdata     : user data ptr (stream object)
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_stream_unmap_buf_ops(uint32_t frame_idx,
+                                       int32_t plane_idx,
+                                       cam_mapping_buf_type type,
+                                       void *userdata)
+{
+    mm_stream_t *my_obj = (mm_stream_t *)userdata;
+    return mm_stream_unmap_buf(my_obj,
+                               type,
+                               frame_idx,
+                               plane_idx);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_init_bufs
+ *
+ * DESCRIPTION: initialize stream buffers needed. This function will request
+ *              buffers needed from upper layer through the mem ops table passed
+ *              during configuration stage.
+ *
+ * PARAMETERS :
+ *   @my_obj  : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_init_bufs(mm_stream_t * my_obj)
+{
+    int32_t i, rc = 0;
+    uint8_t *reg_flags = NULL;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    /* deinit buf if it's not NULL*/
+    if (NULL != my_obj->buf) {
+        mm_stream_deinit_bufs(my_obj);
+    }
+
+    my_obj->map_ops.map_ops = mm_stream_map_buf_ops;
+    my_obj->map_ops.unmap_ops = mm_stream_unmap_buf_ops;
+    my_obj->map_ops.userdata = my_obj;
+
+    rc = my_obj->mem_vtbl.get_bufs(&my_obj->frame_offset,
+                                   &my_obj->buf_num,
+                                   &reg_flags,
+                                   &my_obj->buf,
+                                   &my_obj->map_ops,
+                                   my_obj->mem_vtbl.user_data);
+
+    if (0 != rc) {
+        CDBG_ERROR("%s: Error get buf, rc = %d\n", __func__, rc);
+        return rc;
+    }
+
+    my_obj->buf_status =
+        (mm_stream_buf_status_t *)malloc(sizeof(mm_stream_buf_status_t) * my_obj->buf_num);
+
+    if (NULL == my_obj->buf_status) {
+        CDBG_ERROR("%s: No memory for buf_status", __func__);
+        mm_stream_deinit_bufs(my_obj);
+        free(reg_flags);
+        return -1;
+    }
+
+    memset(my_obj->buf_status, 0, sizeof(mm_stream_buf_status_t) * my_obj->buf_num);
+    for (i = 0; i < my_obj->buf_num; i++) {
+        my_obj->buf_status[i].initial_reg_flag = reg_flags[i];
+        my_obj->buf[i].stream_id = my_obj->my_hdl;
+        my_obj->buf[i].stream_type = my_obj->stream_info->stream_type;
+
+        if (my_obj->buf[i].buf_type == CAM_STREAM_BUF_TYPE_USERPTR) {
+            my_obj->buf[i].user_buf.bufs_used =
+                    (int8_t)my_obj->stream_info->user_buf_info.frame_buf_cnt;
+            my_obj->buf[i].user_buf.buf_in_use = reg_flags[i];
+        }
+    }
+
+    if (my_obj->stream_info->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        my_obj->plane_buf = my_obj->buf[0].user_buf.plane_buf;
+        if (my_obj->plane_buf != NULL) {
+            my_obj->plane_buf_num =
+                    my_obj->buf_num *
+                    my_obj->stream_info->user_buf_info.frame_buf_cnt;
+            for (i = 0; i < my_obj->plane_buf_num; i++) {
+                my_obj->plane_buf[i].stream_id = my_obj->my_hdl;
+                my_obj->plane_buf[i].stream_type = my_obj->stream_info->stream_type;
+            }
+        }
+        my_obj->cur_bufs_staged = 0;
+        my_obj->cur_buf_idx = -1;
+    }
+
+    free(reg_flags);
+    reg_flags = NULL;
+
+    /* update in stream info about number of stream buffers */
+    my_obj->stream_info->num_bufs = my_obj->buf_num;
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_deinit_bufs
+ *
+ * DESCRIPTION: return stream buffers to upper layer through the mem ops table
+ *              passed during configuration stage.
+ *
+ * PARAMETERS :
+ *   @my_obj  : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_deinit_bufs(mm_stream_t * my_obj)
+{
+    int32_t rc = 0;
+
+    mm_camera_map_unmap_ops_tbl_t ops_tbl;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    if (NULL == my_obj->buf) {
+        CDBG("%s: Buf is NULL, no need to deinit", __func__);
+        return rc;
+    }
+
+    /* release bufs */
+    ops_tbl.map_ops = mm_stream_map_buf_ops;
+    ops_tbl.unmap_ops = mm_stream_unmap_buf_ops;
+    ops_tbl.userdata = my_obj;
+
+    rc = my_obj->mem_vtbl.put_bufs(&ops_tbl,
+                                   my_obj->mem_vtbl.user_data);
+
+    if (my_obj->plane_buf != NULL) {
+        free(my_obj->plane_buf);
+        my_obj->plane_buf = NULL;
+    }
+
+    free(my_obj->buf);
+    my_obj->buf = NULL;
+    if (my_obj->buf_status != NULL) {
+        free(my_obj->buf_status);
+        my_obj->buf_status = NULL;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_reg_buf
+ *
+ * DESCRIPTION: register buffers with kernel by calling v4l2 ioctl QBUF for
+ *              each buffer in the stream
+ *
+ * PARAMETERS :
+ *   @my_obj  : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_reg_buf(mm_stream_t * my_obj)
+{
+    int32_t rc = 0;
+    uint8_t i;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    rc = mm_stream_request_buf(my_obj);
+    if (rc != 0) {
+        return rc;
+    }
+
+    pthread_mutex_lock(&my_obj->buf_lock);
+    my_obj->queued_buffer_count = 0;
+    for(i = 0; i < my_obj->buf_num; i++){
+        /* check if need to qbuf initially */
+        if (my_obj->buf_status[i].initial_reg_flag) {
+            rc = mm_stream_qbuf(my_obj, &my_obj->buf[i]);
+            if (rc != 0) {
+                CDBG_ERROR("%s: VIDIOC_QBUF rc = %d\n", __func__, rc);
+                break;
+            }
+            my_obj->buf_status[i].buf_refcnt = 0;
+            my_obj->buf_status[i].in_kernel = 1;
+        } else {
+            /* the buf is held by upper layer, will not queue into kernel.
+             * add buf reference count */
+            my_obj->buf_status[i].buf_refcnt = 1;
+            my_obj->buf_status[i].in_kernel = 0;
+        }
+    }
+    pthread_mutex_unlock(&my_obj->buf_lock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_unreg buf
+ *
+ * DESCRIPTION: unregister all stream buffers from kernel
+ *
+ * PARAMETERS :
+ *   @my_obj  : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_unreg_buf(mm_stream_t * my_obj)
+{
+    struct v4l2_requestbuffers bufreq;
+    int32_t i, rc = 0;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    /* unreg buf to kernel */
+    bufreq.count = 0;
+    bufreq.type  = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    bufreq.memory = V4L2_MEMORY_USERPTR;
+    rc = ioctl(my_obj->fd, VIDIOC_REQBUFS, &bufreq);
+    if (rc < 0) {
+        CDBG_ERROR("%s: fd=%d, VIDIOC_REQBUFS failed, rc=%d\n",
+              __func__, my_obj->fd, rc);
+    }
+
+    /* reset buf reference count */
+    pthread_mutex_lock(&my_obj->buf_lock);
+    if (NULL != my_obj->buf_status) {
+        for(i = 0; i < my_obj->buf_num; i++){
+            my_obj->buf_status[i].buf_refcnt = 0;
+            my_obj->buf_status[i].in_kernel = 0;
+        }
+    }
+    pthread_mutex_unlock(&my_obj->buf_lock);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_get_v4l2_fmt
+ *
+ * DESCRIPTION: translate camera image format into FOURCC code
+ *
+ * PARAMETERS :
+ *   @fmt     : camera image format
+ *
+ * RETURN     : FOURCC code for image format
+ *==========================================================================*/
+uint32_t mm_stream_get_v4l2_fmt(cam_format_t fmt)
+{
+    uint32_t val;
+    switch(fmt) {
+    case CAM_FORMAT_YUV_420_NV12:
+    case CAM_FORMAT_YUV_420_NV12_VENUS:
+        val = V4L2_PIX_FMT_NV12;
+        break;
+    case CAM_FORMAT_YUV_420_NV21:
+        val = V4L2_PIX_FMT_NV21;
+        break;
+    case CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG:
+    case CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GRBG:
+    case CAM_FORMAT_BAYER_QCOM_RAW_10BPP_RGGB:
+    case CAM_FORMAT_BAYER_QCOM_RAW_10BPP_BGGR:
+        val= V4L2_PIX_FMT_SBGGR10;
+        break;
+    case CAM_FORMAT_YUV_422_NV61:
+        val= V4L2_PIX_FMT_NV61;
+        break;
+    case CAM_FORMAT_YUV_RAW_8BIT_YUYV:
+        val= V4L2_PIX_FMT_YUYV;
+        break;
+    case CAM_FORMAT_YUV_RAW_8BIT_YVYU:
+        val= V4L2_PIX_FMT_YVYU;
+        break;
+    case CAM_FORMAT_YUV_RAW_8BIT_UYVY:
+        val= V4L2_PIX_FMT_UYVY;
+        break;
+    case CAM_FORMAT_YUV_RAW_8BIT_VYUY:
+        val= V4L2_PIX_FMT_VYUY;
+        break;
+    case CAM_FORMAT_YUV_420_YV12:
+        val= V4L2_PIX_FMT_NV12;
+        break;
+    case CAM_FORMAT_YUV_422_NV16:
+        val= V4L2_PIX_FMT_NV16;
+        break;
+    default:
+        val = 0;
+        CDBG_ERROR("%s: Unknown fmt=%d", __func__, fmt);
+        break;
+    }
+    CDBG("%s: fmt=%d, val =%d", __func__, fmt, val);
+    return val;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_calc_offset_preview
+ *
+ * DESCRIPTION: calculate preview frame offset based on format and
+ *              padding information
+ *
+ * PARAMETERS :
+ *   @fmt     : image format
+ *   @dim     : image dimension
+ *   @buf_planes : [out] buffer plane information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_calc_offset_preview(cam_format_t fmt,
+                                      cam_dimension_t *dim,
+                                      cam_stream_buf_plane_info_t *buf_planes)
+{
+    int32_t rc = 0;
+    int stride = 0, scanline = 0;
+
+    switch (fmt) {
+    case CAM_FORMAT_YUV_420_NV12:
+    case CAM_FORMAT_YUV_420_NV21:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_32);
+        scanline = PAD_TO_SIZE(dim->height, CAM_PAD_TO_2);
+
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[0].offset_x = 0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_32);
+        scanline = PAD_TO_SIZE(dim->height / 2, CAM_PAD_TO_2);
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].len =
+            (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[1].offset_x = 0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_420_NV21_ADRENO:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_32);
+        scanline = PAD_TO_SIZE(dim->height, CAM_PAD_TO_32);
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline), CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x = 0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        stride = PAD_TO_SIZE(dim->width / 2, CAM_PAD_TO_32) * 2;
+        scanline = PAD_TO_SIZE(dim->height / 2, CAM_PAD_TO_32);
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline), CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[1].offset_x = 0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_420_YV12:
+        /* 3 planes: Y + Cr + Cb */
+        buf_planes->plane_info.num_planes = 3;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_16);
+        scanline = PAD_TO_SIZE(dim->height, CAM_PAD_TO_2);
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[0].offset_x = 0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        stride = PAD_TO_SIZE(stride / 2, CAM_PAD_TO_16);
+        scanline = scanline / 2;
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].len =
+            (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[1].offset_x = 0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width / 2;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.mp[2].offset = 0;
+        buf_planes->plane_info.mp[2].len =
+            (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[2].offset_x = 0;
+        buf_planes->plane_info.mp[2].offset_y = 0;
+        buf_planes->plane_info.mp[2].stride = stride;
+        buf_planes->plane_info.mp[2].scanline = scanline;
+        buf_planes->plane_info.mp[2].width = dim->width / 2;
+        buf_planes->plane_info.mp[2].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len +
+                        buf_planes->plane_info.mp[2].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_422_NV16:
+    case CAM_FORMAT_YUV_422_NV61:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_16);
+        scanline = dim->height;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[0].offset_x = 0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[1].offset_x = 0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_420_NV12_VENUS:
+#ifdef VENUS_PRESENT
+        // using Venus
+        stride = VENUS_Y_STRIDE(COLOR_FMT_NV12, dim->width);
+        scanline = VENUS_Y_SCANLINES(COLOR_FMT_NV12, dim->height);
+
+        buf_planes->plane_info.frame_len =
+            VENUS_BUFFER_SIZE(COLOR_FMT_NV12, dim->width, dim->height);
+        buf_planes->plane_info.num_planes = 2;
+        buf_planes->plane_info.mp[0].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+        stride = VENUS_UV_STRIDE(COLOR_FMT_NV12, dim->width);
+        scanline = VENUS_UV_SCANLINES(COLOR_FMT_NV12, dim->height);
+        buf_planes->plane_info.mp[1].len =
+            buf_planes->plane_info.frame_len - buf_planes->plane_info.mp[0].len;
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].offset_x =0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+#else
+        CDBG_ERROR("%s: Venus hardware not avail, cannot use this format", __func__);
+        rc = -1;
+#endif
+        break;
+    default:
+        CDBG_ERROR("%s: Invalid cam_format for preview %d",
+                   __func__, fmt);
+        rc = -1;
+        break;
+    }
+
+    return rc;
+}
+/*===========================================================================
+ * FUNCTION   : mm_stream_calc_offset_post_view
+ *
+ * DESCRIPTION: calculate postview frame offset based on format and
+ *              padding information
+ *
+ * PARAMETERS :
+ *   @fmt     : image format
+ *   @dim     : image dimension
+ *   @buf_planes : [out] buffer plane information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_calc_offset_post_view(cam_format_t fmt,
+                                      cam_dimension_t *dim,
+                                      cam_stream_buf_plane_info_t *buf_planes)
+{
+    int32_t rc = 0;
+    int stride = 0, scanline = 0;
+
+    switch (fmt) {
+    case CAM_FORMAT_YUV_420_NV12:
+    case CAM_FORMAT_YUV_420_NV21:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_64);
+        scanline = PAD_TO_SIZE(dim->height, CAM_PAD_TO_64);
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[0].offset_x = 0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_64);
+        scanline = PAD_TO_SIZE(dim->height / 2, CAM_PAD_TO_64);
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].len =
+            (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[1].offset_x = 0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_420_NV21_ADRENO:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_32);
+        scanline = PAD_TO_SIZE(dim->height, CAM_PAD_TO_32);
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline), CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x = 0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        stride = PAD_TO_SIZE(dim->width / 2, CAM_PAD_TO_32) * 2;
+        scanline = PAD_TO_SIZE(dim->height / 2, CAM_PAD_TO_32);
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline), CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[1].offset_x = 0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_420_YV12:
+        /* 3 planes: Y + Cr + Cb */
+        buf_planes->plane_info.num_planes = 3;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_16);
+        scanline = PAD_TO_SIZE(dim->height, CAM_PAD_TO_2);
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[0].offset_x = 0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        stride = PAD_TO_SIZE(stride / 2, CAM_PAD_TO_16);
+        scanline = scanline / 2;
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].len =
+            (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[1].offset_x = 0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width / 2;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.mp[2].offset = 0;
+        buf_planes->plane_info.mp[2].len =
+            (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[2].offset_x = 0;
+        buf_planes->plane_info.mp[2].offset_y = 0;
+        buf_planes->plane_info.mp[2].stride = stride;
+        buf_planes->plane_info.mp[2].scanline = scanline;
+        buf_planes->plane_info.mp[2].width = dim->width / 2;
+        buf_planes->plane_info.mp[2].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len +
+                        buf_planes->plane_info.mp[2].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_422_NV16:
+    case CAM_FORMAT_YUV_422_NV61:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_16);
+        scanline = dim->height;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[0].offset_x = 0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[1].offset_x = 0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_420_NV12_VENUS:
+#ifdef VENUS_PRESENT
+        // using Venus
+        stride = VENUS_Y_STRIDE(COLOR_FMT_NV12, dim->width);
+        scanline = VENUS_Y_SCANLINES(COLOR_FMT_NV12, dim->height);
+
+        buf_planes->plane_info.frame_len =
+            VENUS_BUFFER_SIZE(COLOR_FMT_NV12, dim->width, dim->height);
+        buf_planes->plane_info.num_planes = 2;
+        buf_planes->plane_info.mp[0].len = (uint32_t)(stride * scanline);
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+        stride = VENUS_UV_STRIDE(COLOR_FMT_NV12, dim->width);
+        scanline = VENUS_UV_SCANLINES(COLOR_FMT_NV12, dim->height);
+        buf_planes->plane_info.mp[1].len =
+            buf_planes->plane_info.frame_len - buf_planes->plane_info.mp[0].len;
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].offset_x =0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+#else
+        CDBG_ERROR("%s: Venus hardware not avail, cannot use this format", __func__);
+        rc = -1;
+#endif
+        break;
+    default:
+        CDBG_ERROR("%s: Invalid cam_format for preview %d",
+                   __func__, fmt);
+        rc = -1;
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_calc_offset_snapshot
+ *
+ * DESCRIPTION: calculate snapshot/postproc frame offset based on format and
+ *              padding information
+ *
+ * PARAMETERS :
+ *   @fmt     : image format
+ *   @dim     : image dimension
+ *   @padding : padding information
+ *   @buf_planes : [out] buffer plane information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_calc_offset_snapshot(cam_format_t fmt,
+                                       cam_dimension_t *dim,
+                                       cam_padding_info_t *padding,
+                                       cam_stream_buf_plane_info_t *buf_planes)
+{
+    int32_t rc = 0;
+    uint8_t isAFamily = mm_camera_util_chip_is_a_family();
+    int offset_x = 0, offset_y = 0;
+    int stride = 0, scanline = 0;
+
+    if (isAFamily) {
+        stride = dim->width;
+        scanline = PAD_TO_SIZE(dim->height, CAM_PAD_TO_16);
+        offset_x = 0;
+        offset_y = scanline - dim->height;
+        scanline += offset_y; /* double padding */
+    } else {
+        stride = PAD_TO_SIZE(dim->width,
+                             padding->width_padding);
+        scanline = PAD_TO_SIZE(dim->height,
+                               padding->height_padding);
+        offset_x = 0;
+        offset_y = 0;
+    }
+
+    switch (fmt) {
+    case CAM_FORMAT_YUV_420_NV12:
+    case CAM_FORMAT_YUV_420_NV21:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset_x = offset_x;
+        buf_planes->plane_info.mp[0].offset_y = offset_y;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        scanline = scanline / 2;
+        buf_planes->plane_info.mp[1].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset_x = offset_x;
+        buf_planes->plane_info.mp[1].offset_y = offset_y;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_420_YV12:
+        /* 3 planes: Y + Cr + Cb */
+        buf_planes->plane_info.num_planes = 3;
+
+        buf_planes->plane_info.mp[0].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset_x = offset_x;
+        buf_planes->plane_info.mp[0].offset_y = offset_y;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        stride = PAD_TO_SIZE(stride / 2, CAM_PAD_TO_16);
+        scanline = scanline / 2;
+        buf_planes->plane_info.mp[1].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset_x = offset_x;
+        buf_planes->plane_info.mp[1].offset_y = offset_y;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width / 2;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.mp[2].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[2].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[2].offset_x = offset_x;
+        buf_planes->plane_info.mp[2].offset_y = offset_y;
+        buf_planes->plane_info.mp[2].stride = stride;
+        buf_planes->plane_info.mp[2].scanline = scanline;
+        buf_planes->plane_info.mp[2].width = dim->width / 2;
+        buf_planes->plane_info.mp[2].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len +
+                        buf_planes->plane_info.mp[2].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_422_NV16:
+    case CAM_FORMAT_YUV_422_NV61:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset_x = offset_x;
+        buf_planes->plane_info.mp[0].offset_y = offset_y;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        buf_planes->plane_info.mp[1].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset_x = offset_x;
+        buf_planes->plane_info.mp[1].offset_y = offset_y;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height;
+
+        buf_planes->plane_info.frame_len = PAD_TO_SIZE(
+            buf_planes->plane_info.mp[0].len + buf_planes->plane_info.mp[1].len,
+            CAM_PAD_TO_4K);
+        break;
+    default:
+        CDBG_ERROR("%s: Invalid cam_format for snapshot %d",
+                   __func__, fmt);
+        rc = -1;
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_calc_offset_raw
+ *
+ * DESCRIPTION: calculate raw frame offset based on format and padding information
+ *
+ * PARAMETERS :
+ *   @fmt     : image format
+ *   @dim     : image dimension
+ *   @padding : padding information
+ *   @buf_planes : [out] buffer plane information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_calc_offset_raw(cam_format_t fmt,
+                                  cam_dimension_t *dim,
+                                  cam_padding_info_t *padding,
+                                  cam_stream_buf_plane_info_t *buf_planes)
+{
+    int32_t rc = 0;
+
+    if ((NULL == dim) || (NULL == padding) || (NULL == buf_planes)) {
+        return -1;
+    }
+
+    int32_t stride = PAD_TO_SIZE(dim->width, (int32_t)padding->width_padding);
+    int32_t stride_in_bytes = stride;
+    int32_t scanline = PAD_TO_SIZE(dim->height, (int32_t)padding->height_padding);
+
+    switch (fmt) {
+    case CAM_FORMAT_YUV_420_NV21:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].offset_x = 0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].stride_in_bytes = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        scanline = scanline / 2;
+        buf_planes->plane_info.mp[1].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset = 0;
+        buf_planes->plane_info.mp[1].offset_x = 0;
+        buf_planes->plane_info.mp[1].offset_y = 0;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].stride_in_bytes = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                buf_planes->plane_info.mp[1].len,
+                CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_RAW_8BIT_YUYV:
+    case CAM_FORMAT_YUV_RAW_8BIT_YVYU:
+    case CAM_FORMAT_YUV_RAW_8BIT_UYVY:
+    case CAM_FORMAT_YUV_RAW_8BIT_VYUY:
+    case CAM_FORMAT_JPEG_RAW_8BIT:
+        /* 1 plane */
+        /* Every 16 pixels occupy 16 bytes */
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_16);
+        stride_in_bytes = stride * 2;
+        buf_planes->plane_info.num_planes = 1;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride_in_bytes * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len, CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].stride_in_bytes = stride_in_bytes;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width =
+                (int32_t)buf_planes->plane_info.mp[0].len;
+        buf_planes->plane_info.mp[0].height = 1;
+        break;
+    case CAM_FORMAT_META_RAW_8BIT:
+        // Every 16 pixels occupy 16 bytes
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_16);
+        stride_in_bytes = stride * 2;
+        buf_planes->plane_info.num_planes = 1;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride_in_bytes * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len, CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].stride_in_bytes = stride_in_bytes;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        break;
+
+    case CAM_FORMAT_BAYER_QCOM_RAW_8BPP_GBRG:
+    case CAM_FORMAT_BAYER_QCOM_RAW_8BPP_GRBG:
+    case CAM_FORMAT_BAYER_QCOM_RAW_8BPP_RGGB:
+    case CAM_FORMAT_BAYER_QCOM_RAW_8BPP_BGGR:
+    case CAM_FORMAT_BAYER_MIPI_RAW_8BPP_GBRG:
+    case CAM_FORMAT_BAYER_MIPI_RAW_8BPP_GRBG:
+    case CAM_FORMAT_BAYER_MIPI_RAW_8BPP_RGGB:
+    case CAM_FORMAT_BAYER_MIPI_RAW_8BPP_BGGR:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_8BPP_BGGR:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_8BPP_BGGR:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN8_8BPP_BGGR:
+        /* 1 plane */
+        /* Every 16 pixels occupy 16 bytes */
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_16);
+        stride_in_bytes = stride;
+        buf_planes->plane_info.num_planes = 1;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride_in_bytes * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len, CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].stride_in_bytes = stride_in_bytes;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = (int32_t)buf_planes->plane_info.mp[0].len;
+        buf_planes->plane_info.mp[0].height = 1;
+        break;
+    case CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG:
+    case CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GRBG:
+    case CAM_FORMAT_BAYER_QCOM_RAW_10BPP_RGGB:
+    case CAM_FORMAT_BAYER_QCOM_RAW_10BPP_BGGR:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_10BPP_BGGR:
+        /* Every 12 pixels occupy 16 bytes */
+        stride = (dim->width + 11)/12 * 12;
+        stride_in_bytes = stride * 8 / 6;
+        buf_planes->plane_info.num_planes = 1;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride_in_bytes * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len, CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].stride_in_bytes = stride_in_bytes;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = (int32_t)buf_planes->plane_info.mp[0].len;
+        buf_planes->plane_info.mp[0].height = 1;
+        break;
+    case CAM_FORMAT_BAYER_QCOM_RAW_12BPP_GBRG:
+    case CAM_FORMAT_BAYER_QCOM_RAW_12BPP_GRBG:
+    case CAM_FORMAT_BAYER_QCOM_RAW_12BPP_RGGB:
+    case CAM_FORMAT_BAYER_QCOM_RAW_12BPP_BGGR:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_QCOM_12BPP_BGGR:
+        /* Every 10 pixels occupy 16 bytes */
+        stride = (dim->width + 9)/10 * 10;
+        stride_in_bytes = stride * 8 / 5;
+        buf_planes->plane_info.num_planes = 1;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride_in_bytes * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len, CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].stride_in_bytes = stride_in_bytes;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = (int32_t)buf_planes->plane_info.mp[0].len;
+        buf_planes->plane_info.mp[0].height = 1;
+        break;
+    case CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GBRG:
+    case CAM_FORMAT_BAYER_MIPI_RAW_10BPP_GRBG:
+    case CAM_FORMAT_BAYER_MIPI_RAW_10BPP_RGGB:
+    case CAM_FORMAT_BAYER_MIPI_RAW_10BPP_BGGR:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_10BPP_BGGR:
+        /* Every 64 pixels occupy 80 bytes */
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_4);
+        stride_in_bytes = PAD_TO_SIZE(stride * 5 / 4, CAM_PAD_TO_8);
+        buf_planes->plane_info.num_planes = 1;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride_in_bytes * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len, CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].stride_in_bytes = stride_in_bytes;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = (int32_t)buf_planes->plane_info.mp[0].len;
+        buf_planes->plane_info.mp[0].height = 1;
+        break;
+    case CAM_FORMAT_BAYER_MIPI_RAW_12BPP_GBRG:
+    case CAM_FORMAT_BAYER_MIPI_RAW_12BPP_GRBG:
+    case CAM_FORMAT_BAYER_MIPI_RAW_12BPP_RGGB:
+    case CAM_FORMAT_BAYER_MIPI_RAW_12BPP_BGGR:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_MIPI_12BPP_BGGR:
+        /* Every 32 pixels occupy 48 bytes */
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_32);
+        stride_in_bytes = stride * 3 / 2;
+        buf_planes->plane_info.num_planes = 1;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride_in_bytes * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len, CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].stride_in_bytes = stride_in_bytes;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = (int32_t)buf_planes->plane_info.mp[0].len;
+        buf_planes->plane_info.mp[0].height = 1;
+        break;
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_8BPP_BGGR:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_10BPP_BGGR:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_GBRG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_GRBG:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_RGGB:
+    case CAM_FORMAT_BAYER_IDEAL_RAW_PLAIN16_12BPP_BGGR:
+        /* Every 8 pixels occupy 16 bytes */
+        stride = PAD_TO_SIZE(dim->width, CAM_PAD_TO_8);
+        stride_in_bytes = stride * 2;
+        buf_planes->plane_info.num_planes = 1;
+        buf_planes->plane_info.mp[0].offset = 0;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride_in_bytes * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len, CAM_PAD_TO_4K);
+        buf_planes->plane_info.mp[0].offset_x =0;
+        buf_planes->plane_info.mp[0].offset_y = 0;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].stride_in_bytes = stride_in_bytes;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = (int32_t)buf_planes->plane_info.mp[0].len;
+        buf_planes->plane_info.mp[0].height = 1;
+        break;
+    default:
+        CDBG_ERROR("%s: Invalid cam_format %d for raw stream",
+                   __func__, fmt);
+        rc = -1;
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_calc_offset_video
+ *
+ * DESCRIPTION: calculate video frame offset based on format and
+ *              padding information
+ *
+ * PARAMETERS :
+ *   @dim     : image dimension
+ *   @buf_planes : [out] buffer plane information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+#ifdef VENUS_PRESENT
+int32_t mm_stream_calc_offset_video(cam_dimension_t *dim,
+                                    cam_stream_buf_plane_info_t *buf_planes)
+{
+    int stride = 0, scanline = 0;
+
+    // using Venus
+    stride = VENUS_Y_STRIDE(COLOR_FMT_NV12, dim->width);
+    scanline = VENUS_Y_SCANLINES(COLOR_FMT_NV12, dim->height);
+
+    buf_planes->plane_info.frame_len =
+        VENUS_BUFFER_SIZE(COLOR_FMT_NV12, dim->width, dim->height);
+    buf_planes->plane_info.num_planes = 2;
+    buf_planes->plane_info.mp[0].len = (uint32_t)(stride * scanline);
+    buf_planes->plane_info.mp[0].offset = 0;
+    buf_planes->plane_info.mp[0].offset_x =0;
+    buf_planes->plane_info.mp[0].offset_y = 0;
+    buf_planes->plane_info.mp[0].stride = stride;
+    buf_planes->plane_info.mp[0].scanline = scanline;
+    buf_planes->plane_info.mp[0].width = dim->width;
+    buf_planes->plane_info.mp[0].height = dim->height;
+    stride = VENUS_UV_STRIDE(COLOR_FMT_NV12, dim->width);
+    scanline = VENUS_UV_SCANLINES(COLOR_FMT_NV12, dim->height);
+    buf_planes->plane_info.mp[1].len =
+        buf_planes->plane_info.frame_len - buf_planes->plane_info.mp[0].len;
+    buf_planes->plane_info.mp[1].offset = 0;
+    buf_planes->plane_info.mp[1].offset_x =0;
+    buf_planes->plane_info.mp[1].offset_y = 0;
+    buf_planes->plane_info.mp[1].stride = stride;
+    buf_planes->plane_info.mp[1].scanline = scanline;
+    buf_planes->plane_info.mp[1].width = dim->width;
+    buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+    return 0;
+}
+#else
+int32_t mm_stream_calc_offset_video(cam_dimension_t *dim,
+                                    cam_stream_buf_plane_info_t *buf_planes)
+{
+    int stride = 0, scanline = 0;
+
+    buf_planes->plane_info.num_planes = 2;
+
+    stride = dim->width;
+    scanline = dim->height;
+    buf_planes->plane_info.mp[0].len =
+            PAD_TO_SIZE((uint32_t)(stride * scanline), CAM_PAD_TO_2K);
+    buf_planes->plane_info.mp[0].offset = 0;
+    buf_planes->plane_info.mp[0].offset_x =0;
+    buf_planes->plane_info.mp[0].offset_y = 0;
+    buf_planes->plane_info.mp[0].stride = stride;
+    buf_planes->plane_info.mp[0].scanline = scanline;
+    buf_planes->plane_info.mp[0].width = dim->width;
+    buf_planes->plane_info.mp[0].height = dim->height;
+
+    stride = dim->width;
+    scanline = dim->height / 2;
+    buf_planes->plane_info.mp[1].len =
+            PAD_TO_SIZE((uint32_t)(stride * scanline), CAM_PAD_TO_2K);
+    buf_planes->plane_info.mp[1].offset = 0;
+    buf_planes->plane_info.mp[1].offset_x =0;
+    buf_planes->plane_info.mp[1].offset_y = 0;
+    buf_planes->plane_info.mp[1].stride = stride;
+    buf_planes->plane_info.mp[1].scanline = scanline;
+    buf_planes->plane_info.mp[1].width = dim->width;
+    buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+    buf_planes->plane_info.frame_len =
+            PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                    buf_planes->plane_info.mp[1].len,
+                    CAM_PAD_TO_4K);
+
+    return 0;
+}
+#endif
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_calc_offset_metadata
+ *
+ * DESCRIPTION: calculate metadata frame offset based on format and
+ *              padding information
+ *
+ * PARAMETERS :
+ *   @dim     : image dimension
+ *   @padding : padding information
+ *   @buf_planes : [out] buffer plane information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_calc_offset_metadata(cam_dimension_t *dim,
+                                       cam_padding_info_t *padding,
+                                       cam_stream_buf_plane_info_t *buf_planes)
+{
+    int32_t rc = 0;
+    buf_planes->plane_info.num_planes = 1;
+    buf_planes->plane_info.mp[0].offset = 0;
+    buf_planes->plane_info.mp[0].len =
+            PAD_TO_SIZE((uint32_t)(dim->width * dim->height),
+                    padding->plane_padding);
+    buf_planes->plane_info.frame_len =
+        buf_planes->plane_info.mp[0].len;
+
+    buf_planes->plane_info.mp[0].offset_x =0;
+    buf_planes->plane_info.mp[0].offset_y = 0;
+    buf_planes->plane_info.mp[0].stride = dim->width;
+    buf_planes->plane_info.mp[0].scanline = dim->height;
+    buf_planes->plane_info.mp[0].width = dim->width;
+    buf_planes->plane_info.mp[0].height = dim->height;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_calc_offset_analysis
+ *
+ * DESCRIPTION: calculate analysis frame offset based on format and
+ *              padding information
+ *
+ * PARAMETERS :
+ *   @fmt     : image format
+ *   @dim     : image dimension
+ *   @padding : padding information
+ *   @buf_planes : [out] buffer plane information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_calc_offset_analysis(cam_format_t fmt,
+                                       cam_dimension_t *dim,
+                                       cam_padding_info_t *padding,
+                                       cam_stream_buf_plane_info_t *buf_planes)
+{
+    int32_t rc = 0;
+    int32_t offset_x = 0, offset_y = 0;
+    int32_t stride, scanline;
+
+    /* Clip to minimum supported bytes per line */
+    if ((uint32_t)dim->width < padding->min_stride) {
+        stride = (int32_t)padding->min_stride;
+    } else {
+        stride = dim->width;
+    }
+
+    if ((uint32_t)dim->height < padding->min_scanline) {
+      scanline = (int32_t)padding->min_scanline;
+    } else {
+      scanline = dim->height;
+    }
+
+    stride = PAD_TO_SIZE(stride, padding->width_padding);
+    scanline = PAD_TO_SIZE(scanline, padding->height_padding);
+
+    switch (fmt) {
+    case CAM_FORMAT_YUV_420_NV12:
+    case CAM_FORMAT_YUV_420_NV21:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset_x = offset_x;
+        buf_planes->plane_info.mp[0].offset_y = offset_y;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        scanline = scanline / 2;
+        buf_planes->plane_info.mp[1].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset_x = offset_x;
+        buf_planes->plane_info.mp[1].offset_y = offset_y;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_420_YV12:
+        /* 3 planes: Y + Cr + Cb */
+        buf_planes->plane_info.num_planes = 3;
+
+        buf_planes->plane_info.mp[0].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset_x = offset_x;
+        buf_planes->plane_info.mp[0].offset_y = offset_y;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        stride = PAD_TO_SIZE(stride / 2, CAM_PAD_TO_16);
+        scanline = scanline / 2;
+        buf_planes->plane_info.mp[1].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset_x = offset_x;
+        buf_planes->plane_info.mp[1].offset_y = offset_y;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width / 2;
+        buf_planes->plane_info.mp[1].height = dim->height / 2;
+
+        buf_planes->plane_info.mp[2].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[2].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[2].offset_x = offset_x;
+        buf_planes->plane_info.mp[2].offset_y = offset_y;
+        buf_planes->plane_info.mp[2].stride = stride;
+        buf_planes->plane_info.mp[2].scanline = scanline;
+        buf_planes->plane_info.mp[2].width = dim->width / 2;
+        buf_planes->plane_info.mp[2].height = dim->height / 2;
+
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len +
+                        buf_planes->plane_info.mp[1].len +
+                        buf_planes->plane_info.mp[2].len,
+                        CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_YUV_422_NV16:
+    case CAM_FORMAT_YUV_422_NV61:
+        /* 2 planes: Y + CbCr */
+        buf_planes->plane_info.num_planes = 2;
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset_x = offset_x;
+        buf_planes->plane_info.mp[0].offset_y = offset_y;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+
+        buf_planes->plane_info.mp[1].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                        padding->plane_padding);
+        buf_planes->plane_info.mp[1].offset_x = offset_x;
+        buf_planes->plane_info.mp[1].offset_y = offset_y;
+        buf_planes->plane_info.mp[1].stride = stride;
+        buf_planes->plane_info.mp[1].scanline = scanline;
+        buf_planes->plane_info.mp[1].width = dim->width;
+        buf_planes->plane_info.mp[1].height = dim->height;
+
+        buf_planes->plane_info.frame_len = PAD_TO_SIZE(
+            buf_planes->plane_info.mp[0].len + buf_planes->plane_info.mp[1].len,
+            CAM_PAD_TO_4K);
+        break;
+    case CAM_FORMAT_Y_ONLY:
+        buf_planes->plane_info.num_planes = 1;
+
+        buf_planes->plane_info.mp[0].len =
+                PAD_TO_SIZE((uint32_t)(stride * scanline),
+                padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset =
+                PAD_TO_SIZE((uint32_t)(offset_x + stride * offset_y),
+                padding->plane_padding);
+        buf_planes->plane_info.mp[0].offset_x = offset_x;
+        buf_planes->plane_info.mp[0].offset_y = offset_y;
+        buf_planes->plane_info.mp[0].stride = stride;
+        buf_planes->plane_info.mp[0].scanline = scanline;
+        buf_planes->plane_info.mp[0].width = dim->width;
+        buf_planes->plane_info.mp[0].height = dim->height;
+        buf_planes->plane_info.frame_len =
+                PAD_TO_SIZE(buf_planes->plane_info.mp[0].len, CAM_PAD_TO_4K);
+      break;
+    default:
+        CDBG_ERROR("%s: Invalid cam_format for snapshot %d",
+                   __func__, fmt);
+        rc = -1;
+        break;
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_calc_offset_postproc
+ *
+ * DESCRIPTION: calculate postprocess frame offset
+ *
+ * PARAMETERS :
+ *   @stream_info: ptr to stream info
+ *   @padding : padding information
+ *   @plns : [out] buffer plane information
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_calc_offset_postproc(cam_stream_info_t *stream_info,
+                                       cam_padding_info_t *padding,
+                                       cam_stream_buf_plane_info_t *plns)
+{
+    int32_t rc = 0;
+    cam_stream_type_t type = CAM_STREAM_TYPE_DEFAULT;
+    if (stream_info->reprocess_config.pp_type == CAM_OFFLINE_REPROCESS_TYPE) {
+        type = stream_info->reprocess_config.offline.input_type;
+        if (CAM_STREAM_TYPE_DEFAULT == type) {
+            if (plns->plane_info.frame_len == 0) {
+                // take offset from input source
+                *plns = stream_info->reprocess_config.offline.input_buf_planes;
+                return rc;
+            }
+        } else {
+            type = stream_info->reprocess_config.offline.input_type;
+        }
+    } else {
+        type = stream_info->reprocess_config.online.input_stream_type;
+    }
+
+    switch (type) {
+    case CAM_STREAM_TYPE_PREVIEW:
+        rc = mm_stream_calc_offset_preview(stream_info->fmt,
+                                           &stream_info->dim,
+                                           plns);
+        break;
+    case CAM_STREAM_TYPE_POSTVIEW:
+        rc = mm_stream_calc_offset_post_view(stream_info->fmt,
+                                           &stream_info->dim,
+                                           plns);
+        break;
+    case CAM_STREAM_TYPE_SNAPSHOT:
+    case CAM_STREAM_TYPE_CALLBACK:
+        rc = mm_stream_calc_offset_snapshot(stream_info->fmt,
+                                            &stream_info->dim,
+                                            padding,
+                                            plns);
+        break;
+    case CAM_STREAM_TYPE_VIDEO:
+        rc = mm_stream_calc_offset_video(&stream_info->dim,
+                        plns);
+        break;
+    case CAM_STREAM_TYPE_RAW:
+        rc = mm_stream_calc_offset_raw(stream_info->fmt,
+                                       &stream_info->dim,
+                                       padding,
+                                       plns);
+        break;
+    case CAM_STREAM_TYPE_ANALYSIS:
+        rc = mm_stream_calc_offset_analysis(stream_info->fmt,
+                                            &stream_info->dim,
+                                            padding,
+                                            plns);
+        break;
+    case CAM_STREAM_TYPE_METADATA:
+        rc = mm_stream_calc_offset_metadata(&stream_info->dim,
+                                            padding,
+                                            plns);
+        break;
+    case CAM_STREAM_TYPE_OFFLINE_PROC:
+        rc = mm_stream_calc_offset_snapshot(stream_info->fmt,
+                &stream_info->dim, padding, plns);
+        break;
+    default:
+        CDBG_ERROR("%s: not supported for stream type %d",
+                   __func__, type);
+        rc = -1;
+        break;
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_calc_offset
+ *
+ * DESCRIPTION: calculate frame offset based on format and padding information
+ *
+ * PARAMETERS :
+ *   @my_obj  : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_calc_offset(mm_stream_t *my_obj)
+{
+    int32_t rc = 0;
+
+    cam_dimension_t dim = my_obj->stream_info->dim;
+    if (my_obj->stream_info->pp_config.feature_mask & CAM_QCOM_FEATURE_ROTATION &&
+        my_obj->stream_info->stream_type != CAM_STREAM_TYPE_VIDEO) {
+        if (my_obj->stream_info->pp_config.rotation == ROTATE_90 ||
+            my_obj->stream_info->pp_config.rotation == ROTATE_270) {
+            // rotated by 90 or 270, need to switch width and height
+            dim.width = my_obj->stream_info->dim.height;
+            dim.height = my_obj->stream_info->dim.width;
+        }
+    }
+
+    switch (my_obj->stream_info->stream_type) {
+    case CAM_STREAM_TYPE_PREVIEW:
+        rc = mm_stream_calc_offset_preview(my_obj->stream_info->fmt,
+                                           &dim,
+                                           &my_obj->stream_info->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_POSTVIEW:
+      rc = mm_stream_calc_offset_post_view(my_obj->stream_info->fmt,
+                                         &dim,
+                                         &my_obj->stream_info->buf_planes);
+      break;
+    case CAM_STREAM_TYPE_SNAPSHOT:
+    case CAM_STREAM_TYPE_CALLBACK:
+        rc = mm_stream_calc_offset_snapshot(my_obj->stream_info->fmt,
+                                            &dim,
+                                            &my_obj->padding_info,
+                                            &my_obj->stream_info->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_OFFLINE_PROC:
+        rc = mm_stream_calc_offset_postproc(my_obj->stream_info,
+                                            &my_obj->padding_info,
+                                            &my_obj->stream_info->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_VIDEO:
+        rc = mm_stream_calc_offset_video(&dim,
+                                         &my_obj->stream_info->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_RAW:
+        rc = mm_stream_calc_offset_raw(my_obj->stream_info->fmt,
+                                       &dim,
+                                       &my_obj->padding_info,
+                                       &my_obj->stream_info->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_ANALYSIS:
+        rc = mm_stream_calc_offset_analysis(my_obj->stream_info->fmt,
+                                            &dim,
+                                            &my_obj->padding_info,
+                                            &my_obj->stream_info->buf_planes);
+        break;
+    case CAM_STREAM_TYPE_METADATA:
+        rc = mm_stream_calc_offset_metadata(&dim,
+                                            &my_obj->padding_info,
+                                            &my_obj->stream_info->buf_planes);
+        break;
+    default:
+        CDBG_ERROR("%s: not supported for stream type %d",
+                   __func__, my_obj->stream_info->stream_type);
+        rc = -1;
+        break;
+    }
+
+    my_obj->frame_offset = my_obj->stream_info->buf_planes.plane_info;
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_sync_info
+ *
+ * DESCRIPTION: synchronize stream information with server
+ *
+ * PARAMETERS :
+ *   @my_obj  : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ * NOTE       : assume stream info buffer is mapped to server and filled in with
+ *              stream information by upper layer. This call will let server to
+ *              synchornize the stream information with HAL. If server find any
+ *              fields that need to be changed accroding to hardware configuration,
+ *              server will modify corresponding fields so that HAL could know
+ *              about it.
+ *==========================================================================*/
+int32_t mm_stream_sync_info(mm_stream_t *my_obj)
+{
+    int32_t rc = 0;
+    int32_t value = 0;
+    my_obj->stream_info->stream_svr_id = my_obj->server_stream_id;
+    rc = mm_stream_calc_offset(my_obj);
+
+    if (rc == 0) {
+        rc = mm_camera_util_s_ctrl(my_obj->fd,
+                                   CAM_PRIV_STREAM_INFO_SYNC,
+                                   &value);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_set_fmt
+ *
+ * DESCRIPTION: set stream format to kernel via v4l2 ioctl
+ *
+ * PARAMETERS :
+ *   @my_obj  : stream object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_set_fmt(mm_stream_t *my_obj)
+{
+    int32_t rc = 0;
+    struct v4l2_format fmt;
+    struct msm_v4l2_format_data msm_fmt;
+    int i;
+
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    if (my_obj->stream_info->dim.width == 0 ||
+        my_obj->stream_info->dim.height == 0) {
+        CDBG_ERROR("%s:invalid input[w=%d,h=%d,fmt=%d]\n",
+                   __func__,
+                   my_obj->stream_info->dim.width,
+                   my_obj->stream_info->dim.height,
+                   my_obj->stream_info->fmt);
+        return -1;
+    }
+
+    memset(&fmt, 0, sizeof(fmt));
+    memset(&msm_fmt, 0, sizeof(msm_fmt));
+    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+    msm_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+
+
+    msm_fmt.width = (unsigned int)my_obj->stream_info->dim.width;
+    msm_fmt.height = (unsigned int)my_obj->stream_info->dim.height;
+    msm_fmt.pixelformat = mm_stream_get_v4l2_fmt(my_obj->stream_info->fmt);
+
+    if (my_obj->stream_info->streaming_mode != CAM_STREAMING_MODE_BATCH) {
+        msm_fmt.num_planes = (unsigned char)my_obj->frame_offset.num_planes;
+        for (i = 0; i < msm_fmt.num_planes; i++) {
+            msm_fmt.plane_sizes[i] = my_obj->frame_offset.mp[i].len;
+        }
+    } else {
+        msm_fmt.num_planes = 1;
+        msm_fmt.plane_sizes[0] = my_obj->stream_info->user_buf_info.size;
+    }
+
+    memcpy(fmt.fmt.raw_data, &msm_fmt, sizeof(msm_fmt));
+    rc = ioctl(my_obj->fd, VIDIOC_S_FMT, &fmt);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_buf_done
+ *
+ * DESCRIPTION: enqueue buffer back to kernel
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @frame        : frame to be enqueued back to kernel
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_buf_done(mm_stream_t * my_obj,
+                           mm_camera_buf_def_t *frame)
+{
+    int32_t rc = 0;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    pthread_mutex_lock(&my_obj->buf_lock);
+    if (my_obj->stream_info->streaming_mode == CAM_STREAMING_MODE_BATCH) {
+        rc = mm_stream_write_user_buf(my_obj, frame);
+    } else if(my_obj->buf_status[frame->buf_idx].buf_refcnt == 0) {
+        CDBG("%s: Error Trying to free second time?(idx=%d) count=%d\n",
+                   __func__, frame->buf_idx,
+                   my_obj->buf_status[frame->buf_idx].buf_refcnt);
+        rc = -1;
+    } else {
+        my_obj->buf_status[frame->buf_idx].buf_refcnt--;
+        if (0 == my_obj->buf_status[frame->buf_idx].buf_refcnt) {
+            CDBG("<DEBUG> : Buf done for buffer:%d, stream:%d", frame->buf_idx, frame->stream_type);
+            rc = mm_stream_qbuf(my_obj, frame);
+            if(rc < 0) {
+                CDBG_ERROR("%s: mm_camera_stream_qbuf(idx=%d) err=%d\n",
+                           __func__, frame->buf_idx, rc);
+            } else {
+                my_obj->buf_status[frame->buf_idx].in_kernel = 1;
+            }
+        }else{
+            CDBG("<DEBUG> : Still ref count pending count :%d",
+                 my_obj->buf_status[frame->buf_idx].buf_refcnt);
+            CDBG("<DEBUG> : for buffer:%p:%d",
+                 my_obj, frame->buf_idx);
+        }
+    }
+    pthread_mutex_unlock(&my_obj->buf_lock);
+    return rc;
+}
+
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_get_queued_buf_count
+ *
+ * DESCRIPTION: return queued buffer count
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *
+ * RETURN     : queued buffer count
+ *==========================================================================*/
+int32_t mm_stream_get_queued_buf_count(mm_stream_t *my_obj)
+{
+    int32_t rc = 0;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+            __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+    pthread_mutex_lock(&my_obj->buf_lock);
+    rc = my_obj->queued_buffer_count;
+    pthread_mutex_unlock(&my_obj->buf_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_stream_reg_buf_cb
+ *
+ * DESCRIPTION: Allow other stream to register dataCB at this stream.
+ *
+ * PARAMETERS :
+ *   @my_obj       : stream object
+ *   @val          : ptr to info about the callback to be registered
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_stream_reg_buf_cb(mm_stream_t *my_obj,
+                             mm_stream_data_cb_t *val)
+{
+    int32_t rc = -1;
+    uint8_t i;
+    CDBG("%s: E, my_handle = 0x%x, fd = %d, state = %d",
+         __func__, my_obj->my_hdl, my_obj->fd, my_obj->state);
+
+    pthread_mutex_lock(&my_obj->cb_lock);
+    for (i=0 ;i < MM_CAMERA_STREAM_BUF_CB_MAX; i++) {
+        if(NULL == my_obj->buf_cb[i].cb) {
+            my_obj->buf_cb[i] = *val;
+            rc = 0;
+            break;
+        }
+    }
+    pthread_mutex_unlock(&my_obj->cb_lock);
+
+    return rc;
+}
diff --git a/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_thread.c b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_thread.c
new file mode 100644
index 0000000..0351dfd
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-interface/src/mm_camera_thread.c
@@ -0,0 +1,697 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/prctl.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <cam_semaphore.h>
+
+#include "mm_camera_dbg.h"
+#include "mm_camera_interface.h"
+#include "mm_camera.h"
+
+typedef enum {
+    /* poll entries updated */
+    MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED,
+    /* poll entries updated asynchronous */
+    MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED_ASYNC,
+    /* commit updates */
+    MM_CAMERA_PIPE_CMD_COMMIT,
+    /* exit */
+    MM_CAMERA_PIPE_CMD_EXIT,
+    /* max count */
+    MM_CAMERA_PIPE_CMD_MAX
+} mm_camera_pipe_cmd_type_t;
+
+typedef enum {
+    MM_CAMERA_POLL_TASK_STATE_STOPPED,
+    MM_CAMERA_POLL_TASK_STATE_POLL,     /* polling pid in polling state. */
+    MM_CAMERA_POLL_TASK_STATE_MAX
+} mm_camera_poll_task_state_type_t;
+
+typedef struct {
+    uint32_t cmd;
+    mm_camera_event_t event;
+} mm_camera_sig_evt_t;
+
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_sig_async
+ *
+ * DESCRIPTION: Asynchoronous call to send a command through pipe.
+ *
+ * PARAMETERS :
+ *   @poll_cb      : ptr to poll thread object
+ *   @cmd          : command to be sent
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_poll_sig_async(mm_camera_poll_thread_t *poll_cb,
+                                  uint32_t cmd)
+{
+    /* send through pipe */
+    /* get the mutex */
+    mm_camera_sig_evt_t cmd_evt;
+
+    CDBG("%s: E cmd = %d", __func__,cmd);
+    memset(&cmd_evt, 0, sizeof(cmd_evt));
+    cmd_evt.cmd = cmd;
+    pthread_mutex_lock(&poll_cb->mutex);
+    /* reset the statue to false */
+    poll_cb->status = FALSE;
+
+    /* send cmd to worker */
+    ssize_t len = write(poll_cb->pfds[1], &cmd_evt, sizeof(cmd_evt));
+    if (len < 1) {
+        CDBG_ERROR("%s: len = %lld, errno = %d", __func__,
+                (long long int)len, errno);
+        /* Avoid waiting for the signal */
+        pthread_mutex_unlock(&poll_cb->mutex);
+        return 0;
+    }
+    CDBG("%s: begin IN mutex write done, len = %lld", __func__,
+            (long long int)len);
+    pthread_mutex_unlock(&poll_cb->mutex);
+    CDBG("%s: X", __func__);
+    return 0;
+}
+
+
+
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_sig
+ *
+ * DESCRIPTION: synchorinzed call to send a command through pipe.
+ *
+ * PARAMETERS :
+ *   @poll_cb      : ptr to poll thread object
+ *   @cmd          : command to be sent
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+static int32_t mm_camera_poll_sig(mm_camera_poll_thread_t *poll_cb,
+                                  uint32_t cmd)
+{
+    /* send through pipe */
+    /* get the mutex */
+    mm_camera_sig_evt_t cmd_evt;
+
+    CDBG("%s: E cmd = %d", __func__,cmd);
+    memset(&cmd_evt, 0, sizeof(cmd_evt));
+    cmd_evt.cmd = cmd;
+    pthread_mutex_lock(&poll_cb->mutex);
+    /* reset the statue to false */
+    poll_cb->status = FALSE;
+    /* send cmd to worker */
+
+    ssize_t len = write(poll_cb->pfds[1], &cmd_evt, sizeof(cmd_evt));
+    if(len < 1) {
+        CDBG_ERROR("%s: len = %lld, errno = %d", __func__,
+                (long long int)len, errno);
+        /* Avoid waiting for the signal */
+        pthread_mutex_unlock(&poll_cb->mutex);
+        return 0;
+    }
+    CDBG("%s: begin IN mutex write done, len = %lld", __func__,
+            (long long int)len);
+    /* wait till worker task gives positive signal */
+    if (FALSE == poll_cb->status) {
+        CDBG("%s: wait", __func__);
+        pthread_cond_wait(&poll_cb->cond_v, &poll_cb->mutex);
+    }
+    /* done */
+    pthread_mutex_unlock(&poll_cb->mutex);
+    CDBG("%s: X", __func__);
+    return 0;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_sig
+ *
+ * DESCRIPTION: signal the status of done
+ *
+ * PARAMETERS :
+ *   @poll_cb : ptr to poll thread object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void mm_camera_poll_sig_done(mm_camera_poll_thread_t *poll_cb)
+{
+    pthread_mutex_lock(&poll_cb->mutex);
+    poll_cb->status = TRUE;
+    pthread_cond_signal(&poll_cb->cond_v);
+    CDBG("%s: done, in mutex", __func__);
+    pthread_mutex_unlock(&poll_cb->mutex);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_set_state
+ *
+ * DESCRIPTION: set a polling state
+ *
+ * PARAMETERS :
+ *   @poll_cb : ptr to poll thread object
+ *   @state   : polling state (stopped/polling)
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void mm_camera_poll_set_state(mm_camera_poll_thread_t *poll_cb,
+                                     mm_camera_poll_task_state_type_t state)
+{
+    poll_cb->state = state;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_proc_pipe
+ *
+ * DESCRIPTION: polling thread routine to process pipe
+ *
+ * PARAMETERS :
+ *   @poll_cb : ptr to poll thread object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void mm_camera_poll_proc_pipe(mm_camera_poll_thread_t *poll_cb)
+{
+    ssize_t read_len;
+    int i;
+    mm_camera_sig_evt_t cmd_evt;
+    read_len = read(poll_cb->pfds[0], &cmd_evt, sizeof(cmd_evt));
+    CDBG("%s: read_fd = %d, read_len = %d, expect_len = %d cmd = %d",
+         __func__, poll_cb->pfds[0], (int)read_len, (int)sizeof(cmd_evt), cmd_evt.cmd);
+    switch (cmd_evt.cmd) {
+    case MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED:
+    case MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED_ASYNC:
+        /* we always have index 0 for pipe read */
+        poll_cb->num_fds = 0;
+        poll_cb->poll_fds[poll_cb->num_fds].fd = poll_cb->pfds[0];
+        poll_cb->poll_fds[poll_cb->num_fds].events = POLLIN|POLLRDNORM|POLLPRI;
+        poll_cb->num_fds++;
+
+        if (MM_CAMERA_POLL_TYPE_EVT == poll_cb->poll_type &&
+                poll_cb->num_fds < MAX_STREAM_NUM_IN_BUNDLE) {
+            if (poll_cb->poll_entries[0].fd >= 0) {
+                /* fd is valid, we update poll_fds */
+                poll_cb->poll_fds[poll_cb->num_fds].fd = poll_cb->poll_entries[0].fd;
+                poll_cb->poll_fds[poll_cb->num_fds].events = POLLIN|POLLRDNORM|POLLPRI;
+                poll_cb->num_fds++;
+            }
+        } else if (MM_CAMERA_POLL_TYPE_DATA == poll_cb->poll_type &&
+                poll_cb->num_fds <= MAX_STREAM_NUM_IN_BUNDLE) {
+            for(i = 0; i < MAX_STREAM_NUM_IN_BUNDLE; i++) {
+                if(poll_cb->poll_entries[i].fd >= 0) {
+                    /* fd is valid, we update poll_fds to this fd */
+                    poll_cb->poll_fds[poll_cb->num_fds].fd = poll_cb->poll_entries[i].fd;
+                    poll_cb->poll_fds[poll_cb->num_fds].events = POLLIN|POLLRDNORM|POLLPRI;
+                    poll_cb->num_fds++;
+                } else {
+                    /* fd is invalid, we set the entry to -1 to prevent polling.
+                     * According to spec, polling will not poll on entry with fd=-1.
+                     * If this is not the case, we need to skip these invalid fds
+                     * when updating this array.
+                     * We still keep fd=-1 in this array because this makes easier to
+                     * map cb associated with this fd once incoming data avail by directly
+                     * using the index-1(0 is reserved for pipe read, so need to reduce index by 1) */
+                    poll_cb->poll_fds[poll_cb->num_fds].fd = -1;
+                    poll_cb->poll_fds[poll_cb->num_fds].events = 0;
+                    poll_cb->num_fds++;
+                }
+            }
+        }
+        if (cmd_evt.cmd != MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED_ASYNC)
+            mm_camera_poll_sig_done(poll_cb);
+        break;
+
+    case MM_CAMERA_PIPE_CMD_COMMIT:
+        mm_camera_poll_sig_done(poll_cb);
+        break;
+    case MM_CAMERA_PIPE_CMD_EXIT:
+    default:
+        mm_camera_poll_set_state(poll_cb, MM_CAMERA_POLL_TASK_STATE_STOPPED);
+        mm_camera_poll_sig_done(poll_cb);
+        break;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_fn
+ *
+ * DESCRIPTION: polling thread routine
+ *
+ * PARAMETERS :
+ *   @poll_cb : ptr to poll thread object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void *mm_camera_poll_fn(mm_camera_poll_thread_t *poll_cb)
+{
+    int rc = 0, i;
+
+    if (NULL == poll_cb) {
+        CDBG_ERROR("%s: poll_cb is NULL!\n", __func__);
+        return NULL;
+    }
+    CDBG("%s: poll type = %d, num_fd = %d poll_cb = %p\n",
+         __func__, poll_cb->poll_type, poll_cb->num_fds,poll_cb);
+    do {
+         for(i = 0; i < poll_cb->num_fds; i++) {
+            poll_cb->poll_fds[i].events = POLLIN|POLLRDNORM|POLLPRI;
+         }
+
+         rc = poll(poll_cb->poll_fds, poll_cb->num_fds, poll_cb->timeoutms);
+         if(rc > 0) {
+            if ((poll_cb->poll_fds[0].revents & POLLIN) &&
+                (poll_cb->poll_fds[0].revents & POLLRDNORM)) {
+                /* if we have data on pipe, we only process pipe in this iteration */
+                CDBG("%s: cmd received on pipe\n", __func__);
+                mm_camera_poll_proc_pipe(poll_cb);
+            } else {
+                for(i=1; i<poll_cb->num_fds; i++) {
+                    /* Checking for ctrl events */
+                    if ((poll_cb->poll_type == MM_CAMERA_POLL_TYPE_EVT) &&
+                        (poll_cb->poll_fds[i].revents & POLLPRI)) {
+                        CDBG("%s: mm_camera_evt_notify\n", __func__);
+                        if (NULL != poll_cb->poll_entries[i-1].notify_cb) {
+                            poll_cb->poll_entries[i-1].notify_cb(poll_cb->poll_entries[i-1].user_data);
+                        }
+                    }
+
+                    if ((MM_CAMERA_POLL_TYPE_DATA == poll_cb->poll_type) &&
+                        (poll_cb->poll_fds[i].revents & POLLIN) &&
+                        (poll_cb->poll_fds[i].revents & POLLRDNORM)) {
+                        CDBG("%s: mm_stream_data_notify\n", __func__);
+                        if (NULL != poll_cb->poll_entries[i-1].notify_cb) {
+                            poll_cb->poll_entries[i-1].notify_cb(poll_cb->poll_entries[i-1].user_data);
+                        }
+                    }
+                }
+            }
+        } else {
+            /* in error case sleep 10 us and then continue. hard coded here */
+            usleep(10);
+            continue;
+        }
+    } while ((poll_cb != NULL) && (poll_cb->state == MM_CAMERA_POLL_TASK_STATE_POLL));
+    return NULL;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_thread
+ *
+ * DESCRIPTION: polling thread entry function
+ *
+ * PARAMETERS :
+ *   @data    : ptr to poll thread object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+static void *mm_camera_poll_thread(void *data)
+{
+    prctl(PR_SET_NAME, (unsigned long)"mm_cam_poll_th", 0, 0, 0);
+    mm_camera_poll_thread_t *poll_cb = (mm_camera_poll_thread_t *)data;
+
+    /* add pipe read fd into poll first */
+    poll_cb->poll_fds[poll_cb->num_fds++].fd = poll_cb->pfds[0];
+
+    mm_camera_poll_sig_done(poll_cb);
+    mm_camera_poll_set_state(poll_cb, MM_CAMERA_POLL_TASK_STATE_POLL);
+    return mm_camera_poll_fn(poll_cb);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_thread
+ *
+ * DESCRIPTION: notify the polling thread that entries for polling fd have
+ *              been updated
+ *
+ * PARAMETERS :
+ *   @poll_cb : ptr to poll thread object
+ *
+ * RETURN     : none
+ *==========================================================================*/
+int32_t mm_camera_poll_thread_notify_entries_updated(mm_camera_poll_thread_t * poll_cb)
+{
+    /* send poll entries updated signal to poll thread */
+    return mm_camera_poll_sig(poll_cb, MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_thread_commit_updates
+ *
+ * DESCRIPTION: sync with all previously pending async updates
+ *
+ * PARAMETERS :
+ *   @poll_cb : ptr to poll thread object
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_poll_thread_commit_updates(mm_camera_poll_thread_t * poll_cb)
+{
+    return mm_camera_poll_sig(poll_cb, MM_CAMERA_PIPE_CMD_COMMIT);
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_thread_add_poll_fd
+ *
+ * DESCRIPTION: add a new fd into polling thread
+ *
+ * PARAMETERS :
+ *   @poll_cb   : ptr to poll thread object
+ *   @handler   : stream handle if channel data polling thread,
+ *                0 if event polling thread
+ *   @fd        : file descriptor need to be added into polling thread
+ *   @notify_cb : callback function to handle if any notify from fd
+ *   @userdata  : user data ptr
+ *   @call_type : Whether its Synchronous or Asynchronous call
+ *
+ * RETURN     : none
+ *==========================================================================*/
+int32_t mm_camera_poll_thread_add_poll_fd(mm_camera_poll_thread_t * poll_cb,
+                                          uint32_t handler,
+                                          int32_t fd,
+                                          mm_camera_poll_notify_t notify_cb,
+                                          void* userdata,
+                                          mm_camera_call_type_t call_type)
+{
+    int32_t rc = -1;
+    uint8_t idx = 0;
+
+    if (MM_CAMERA_POLL_TYPE_DATA == poll_cb->poll_type) {
+        /* get stream idx from handler if CH type */
+        idx = mm_camera_util_get_index_by_handler(handler);
+    } else {
+        /* for EVT type, only idx=0 is valid */
+        idx = 0;
+    }
+
+    if (MAX_STREAM_NUM_IN_BUNDLE > idx) {
+        poll_cb->poll_entries[idx].fd = fd;
+        poll_cb->poll_entries[idx].handler = handler;
+        poll_cb->poll_entries[idx].notify_cb = notify_cb;
+        poll_cb->poll_entries[idx].user_data = userdata;
+        /* send poll entries updated signal to poll thread */
+        if (call_type == mm_camera_sync_call ) {
+            rc = mm_camera_poll_sig(poll_cb, MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED);
+        } else {
+            rc = mm_camera_poll_sig_async(poll_cb, MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED_ASYNC );
+        }
+    } else {
+        CDBG_ERROR("%s: invalid handler %d (%d)",
+                   __func__, handler, idx);
+    }
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : mm_camera_poll_thread_del_poll_fd
+ *
+ * DESCRIPTION: delete a fd from polling thread
+ *
+ * PARAMETERS :
+ *   @poll_cb   : ptr to poll thread object
+ *   @handler   : stream handle if channel data polling thread,
+ *                0 if event polling thread
+ *
+ * RETURN     : int32_t type of status
+ *              0  -- success
+ *              -1 -- failure
+ *==========================================================================*/
+int32_t mm_camera_poll_thread_del_poll_fd(mm_camera_poll_thread_t * poll_cb,
+                                          uint32_t handler,
+                                          mm_camera_call_type_t call_type)
+{
+    int32_t rc = -1;
+    uint8_t idx = 0;
+
+    if (MM_CAMERA_POLL_TYPE_DATA == poll_cb->poll_type) {
+        /* get stream idx from handler if CH type */
+        idx = mm_camera_util_get_index_by_handler(handler);
+    } else {
+        /* for EVT type, only idx=0 is valid */
+        idx = 0;
+    }
+
+    if ((MAX_STREAM_NUM_IN_BUNDLE > idx) &&
+        (handler == poll_cb->poll_entries[idx].handler)) {
+        /* reset poll entry */
+        poll_cb->poll_entries[idx].fd = -1; /* set fd to invalid */
+        poll_cb->poll_entries[idx].handler = 0;
+        poll_cb->poll_entries[idx].notify_cb = NULL;
+
+        /* send poll entries updated signal to poll thread */
+        if (call_type == mm_camera_sync_call ) {
+            rc = mm_camera_poll_sig(poll_cb, MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED);
+        } else {
+            rc = mm_camera_poll_sig_async(poll_cb, MM_CAMERA_PIPE_CMD_POLL_ENTRIES_UPDATED_ASYNC );
+        }
+    } else {
+        CDBG_ERROR("%s: invalid handler %d (%d)",
+                   __func__, handler, idx);
+        return -1;
+    }
+
+    return rc;
+}
+
+int32_t mm_camera_poll_thread_launch(mm_camera_poll_thread_t * poll_cb,
+                                     mm_camera_poll_thread_type_t poll_type)
+{
+    int32_t rc = 0;
+    size_t i = 0, cnt = 0;
+    poll_cb->poll_type = poll_type;
+
+    //Initialize poll_fds
+    cnt = sizeof(poll_cb->poll_fds) / sizeof(poll_cb->poll_fds[0]);
+    for (i = 0; i < cnt; i++) {
+        poll_cb->poll_fds[i].fd = -1;
+    }
+    //Initialize poll_entries
+    cnt = sizeof(poll_cb->poll_entries) / sizeof(poll_cb->poll_entries[0]);
+    for (i = 0; i < cnt; i++) {
+        poll_cb->poll_entries[i].fd = -1;
+    }
+    //Initialize pipe fds
+    poll_cb->pfds[0] = -1;
+    poll_cb->pfds[1] = -1;
+    rc = pipe(poll_cb->pfds);
+    if(rc < 0) {
+        CDBG_ERROR("%s: pipe open rc=%d\n", __func__, rc);
+        return -1;
+    }
+
+    poll_cb->timeoutms = -1;  /* Infinite seconds */
+
+    CDBG("%s: poll_type = %d, read fd = %d, write fd = %d timeout = %d",
+        __func__, poll_cb->poll_type,
+        poll_cb->pfds[0], poll_cb->pfds[1],poll_cb->timeoutms);
+
+    pthread_mutex_init(&poll_cb->mutex, NULL);
+    pthread_cond_init(&poll_cb->cond_v, NULL);
+
+    /* launch the thread */
+    pthread_mutex_lock(&poll_cb->mutex);
+    poll_cb->status = 0;
+    pthread_create(&poll_cb->pid, NULL, mm_camera_poll_thread, (void *)poll_cb);
+    if(!poll_cb->status) {
+        pthread_cond_wait(&poll_cb->cond_v, &poll_cb->mutex);
+    }
+    if (!poll_cb->threadName) {
+        pthread_setname_np(poll_cb->pid, "CAM_poll");
+    } else {
+        pthread_setname_np(poll_cb->pid, poll_cb->threadName);
+    }
+    pthread_mutex_unlock(&poll_cb->mutex);
+    CDBG("%s: End",__func__);
+    return rc;
+}
+
+int32_t mm_camera_poll_thread_release(mm_camera_poll_thread_t *poll_cb)
+{
+    int32_t rc = 0;
+    if(MM_CAMERA_POLL_TASK_STATE_STOPPED == poll_cb->state) {
+        CDBG_ERROR("%s: err, poll thread is not running.\n", __func__);
+        return rc;
+    }
+
+    /* send exit signal to poll thread */
+    mm_camera_poll_sig(poll_cb, MM_CAMERA_PIPE_CMD_EXIT);
+    /* wait until poll thread exits */
+    if (pthread_join(poll_cb->pid, NULL) != 0) {
+        CDBG_ERROR("%s: pthread dead already\n", __func__);
+    }
+
+    /* close pipe */
+    if(poll_cb->pfds[0] >= 0) {
+        close(poll_cb->pfds[0]);
+    }
+    if(poll_cb->pfds[1] >= 0) {
+        close(poll_cb->pfds[1]);
+    }
+
+    pthread_mutex_destroy(&poll_cb->mutex);
+    pthread_cond_destroy(&poll_cb->cond_v);
+    memset(poll_cb, 0, sizeof(mm_camera_poll_thread_t));
+    poll_cb->pfds[0] = -1;
+    poll_cb->pfds[1] = -1;
+    return rc;
+}
+
+static void *mm_camera_cmd_thread(void *data)
+{
+    int running = 1;
+    int ret;
+    mm_camera_cmd_thread_t *cmd_thread =
+                (mm_camera_cmd_thread_t *)data;
+    mm_camera_cmdcb_t* node = NULL;
+
+    do {
+        do {
+            ret = cam_sem_wait(&cmd_thread->cmd_sem);
+            if (ret != 0 && errno != EINVAL) {
+                CDBG_ERROR("%s: cam_sem_wait error (%s)",
+                           __func__, strerror(errno));
+                return NULL;
+            }
+        } while (ret != 0);
+
+        /* we got notified about new cmd avail in cmd queue */
+        node = (mm_camera_cmdcb_t*)cam_queue_deq(&cmd_thread->cmd_queue);
+        while (node != NULL) {
+            switch (node->cmd_type) {
+            case MM_CAMERA_CMD_TYPE_EVT_CB:
+            case MM_CAMERA_CMD_TYPE_DATA_CB:
+            case MM_CAMERA_CMD_TYPE_REQ_DATA_CB:
+            case MM_CAMERA_CMD_TYPE_SUPER_BUF_DATA_CB:
+            case MM_CAMERA_CMD_TYPE_CONFIG_NOTIFY:
+            case MM_CAMERA_CMD_TYPE_START_ZSL:
+            case MM_CAMERA_CMD_TYPE_STOP_ZSL:
+            case MM_CAMERA_CMD_TYPE_GENERAL:
+            case MM_CAMERA_CMD_TYPE_FLUSH_QUEUE:
+                if (NULL != cmd_thread->cb) {
+                    cmd_thread->cb(node, cmd_thread->user_data);
+                }
+                break;
+            case MM_CAMERA_CMD_TYPE_EXIT:
+            default:
+                running = 0;
+                break;
+            }
+            free(node);
+            node = (mm_camera_cmdcb_t*)cam_queue_deq(&cmd_thread->cmd_queue);
+        } /* (node != NULL) */
+    } while (running);
+    return NULL;
+}
+
+int32_t mm_camera_cmd_thread_launch(mm_camera_cmd_thread_t * cmd_thread,
+                                    mm_camera_cmd_cb_t cb,
+                                    void* user_data)
+{
+    int32_t rc = 0;
+
+    cam_sem_init(&cmd_thread->cmd_sem, 0);
+    cam_queue_init(&cmd_thread->cmd_queue);
+    cmd_thread->cb = cb;
+    cmd_thread->user_data = user_data;
+
+    /* launch the thread */
+    pthread_create(&cmd_thread->cmd_pid,
+                   NULL,
+                   mm_camera_cmd_thread,
+                   (void *)cmd_thread);
+
+    if (!cmd_thread->threadName) {
+        pthread_setname_np(cmd_thread->cmd_pid, "CAM_launch");
+    } else {
+        pthread_setname_np(cmd_thread->cmd_pid, cmd_thread->threadName);
+    }
+    return rc;
+}
+
+int32_t mm_camera_cmd_thread_name(const char* name)
+{
+    int32_t rc = 0;
+    /* name the thread */
+    prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0);
+    return rc;
+}
+
+
+int32_t mm_camera_cmd_thread_stop(mm_camera_cmd_thread_t * cmd_thread)
+{
+    int32_t rc = 0;
+    mm_camera_cmdcb_t* node = (mm_camera_cmdcb_t *)malloc(sizeof(mm_camera_cmdcb_t));
+    if (NULL == node) {
+        CDBG_ERROR("%s: No memory for mm_camera_cmdcb_t", __func__);
+        return -1;
+    }
+
+    memset(node, 0, sizeof(mm_camera_cmdcb_t));
+    node->cmd_type = MM_CAMERA_CMD_TYPE_EXIT;
+
+    cam_queue_enq(&cmd_thread->cmd_queue, node);
+    cam_sem_post(&cmd_thread->cmd_sem);
+
+    /* wait until cmd thread exits */
+    if (pthread_join(cmd_thread->cmd_pid, NULL) != 0) {
+        CDBG("%s: pthread dead already\n", __func__);
+    }
+    return rc;
+}
+
+int32_t mm_camera_cmd_thread_destroy(mm_camera_cmd_thread_t * cmd_thread)
+{
+    int32_t rc = 0;
+    cam_queue_deinit(&cmd_thread->cmd_queue);
+    cam_sem_destroy(&cmd_thread->cmd_sem);
+    memset(cmd_thread, 0, sizeof(mm_camera_cmd_thread_t));
+    return rc;
+}
+
+int32_t mm_camera_cmd_thread_release(mm_camera_cmd_thread_t * cmd_thread)
+{
+    int32_t rc = 0;
+    rc = mm_camera_cmd_thread_stop(cmd_thread);
+    if (0 == rc) {
+        rc = mm_camera_cmd_thread_destroy(cmd_thread);
+    }
+    return rc;
+}
diff --git a/camera/QCamera2/stack/mm-camera-test/Android.mk b/camera/QCamera2/stack/mm-camera-test/Android.mk
new file mode 100644
index 0000000..ddfc976
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/Android.mk
@@ -0,0 +1,224 @@
+OLD_LOCAL_PATH := $(LOCAL_PATH)
+LOCAL_PATH:=$(call my-dir)
+
+# Build command line test app: mm-qcamera-app
+include $(LOCAL_PATH)/../../../common.mk
+include $(CLEAR_VARS)
+
+# b/24171136 many files not compiling with clang/llvm yet
+LOCAL_CLANG := false
+
+LOCAL_CFLAGS:= \
+        -DAMSS_VERSION=$(AMSS_VERSION) \
+        $(mmcamera_debug_defines) \
+        $(mmcamera_debug_cflags) \
+        $(USE_SERVER_TREE)
+
+ifeq ($(strip $(TARGET_USES_ION)),true)
+LOCAL_CFLAGS += -DUSE_ION
+endif
+
+LOCAL_CFLAGS += -D_ANDROID_
+
+LOCAL_SRC_FILES:= \
+        src/mm_qcamera_main_menu.c \
+        src/mm_qcamera_app.c \
+        src/mm_qcamera_unit_test.c \
+        src/mm_qcamera_video.c \
+        src/mm_qcamera_preview.c \
+        src/mm_qcamera_snapshot.c \
+        src/mm_qcamera_rdi.c \
+        src/mm_qcamera_reprocess.c\
+        src/mm_qcamera_queue.c \
+        src/mm_qcamera_socket.c \
+        src/mm_qcamera_commands.c
+#        src/mm_qcamera_dual_test.c \
+
+LOCAL_C_INCLUDES:=$(LOCAL_PATH)/inc
+LOCAL_C_INCLUDES+= \
+        frameworks/native/include/media/openmax \
+        $(LOCAL_PATH)/../common \
+        $(LOCAL_PATH)/../../../mm-image-codec/qexif \
+        $(LOCAL_PATH)/../../../mm-image-codec/qomx_core
+
+LOCAL_C_INCLUDES+= $(kernel_includes)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(common_deps)
+
+LOCAL_CFLAGS += -DCAMERA_ION_HEAP_ID=ION_IOMMU_HEAP_ID
+ifeq ($(TARGET_BOARD_PLATFORM),msm8974)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(filter $(TARGET_BOARD_PLATFORM), apq8084 msm8084),$(TARGET_BOARD_PLATFORM))
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8994)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8916)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_MM_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8226)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_MM_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8610)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_MM_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8960)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=5
+else ifneq (,$(filter msm8660,$(TARGET_BOARD_PLATFORM)))
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_CAMERA_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP # Don't Care
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID # EBI
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=5
+else
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_CAMERA_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_CAMERA_HEAP # Don't Care
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=GRALLOC_USAGE_PRIVATE_UNCACHED #uncached
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_CAMERA_HEAP_ID
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=5
+endif
+LOCAL_CFLAGS += -Wall -Wextra -Werror
+
+LOCAL_SHARED_LIBRARIES:= \
+         libcutils libdl libmmcamera_interface
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_32_BIT_ONLY := $(BOARD_QTI_CAMERA_32BIT_ONLY)
+
+LOCAL_MODULE:= mm-qcamera-app
+
+include $(BUILD_EXECUTABLE)
+
+# Build tuning library
+include $(CLEAR_VARS)
+
+# b/24171136 many files not compiling with clang/llvm yet
+LOCAL_CLANG := false
+
+LOCAL_CFLAGS:= \
+        -DAMSS_VERSION=$(AMSS_VERSION) \
+        $(mmcamera_debug_defines) \
+        $(mmcamera_debug_cflags) \
+        $(USE_SERVER_TREE)
+
+ifeq ($(strip $(TARGET_USES_ION)),true)
+LOCAL_CFLAGS += -DUSE_ION
+endif
+
+LOCAL_CFLAGS += -D_ANDROID_
+
+LOCAL_SRC_FILES:= \
+        src/mm_qcamera_main_menu.c \
+        src/mm_qcamera_app.c \
+        src/mm_qcamera_unit_test.c \
+        src/mm_qcamera_video.c \
+        src/mm_qcamera_preview.c \
+        src/mm_qcamera_snapshot.c \
+        src/mm_qcamera_rdi.c \
+        src/mm_qcamera_reprocess.c\
+        src/mm_qcamera_queue.c \
+        src/mm_qcamera_socket.c \
+        src/mm_qcamera_commands.c
+#        src/mm_qcamera_dual_test.c \
+
+LOCAL_C_INCLUDES:=$(LOCAL_PATH)/inc
+LOCAL_C_INCLUDES+= \
+        frameworks/native/include/media/openmax \
+        $(LOCAL_PATH)/../common \
+        $(LOCAL_PATH)/../../../mm-image-codec/qexif \
+        $(LOCAL_PATH)/../../../mm-image-codec/qomx_core
+
+LOCAL_C_INCLUDES+= $(kernel_includes)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(common_deps)
+
+LOCAL_CFLAGS += -DCAMERA_ION_HEAP_ID=ION_IOMMU_HEAP_ID
+ifeq ($(TARGET_BOARD_PLATFORM),msm8974)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(filter $(TARGET_BOARD_PLATFORM), apq8084 msm8084),$(TARGET_BOARD_PLATFORM))
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8994)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8916)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_MM_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8226)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_MM_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8610)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_MM_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=9
+else ifeq ($(TARGET_BOARD_PLATFORM),msm8960)
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=5
+else ifneq (,$(filter msm8660,$(TARGET_BOARD_PLATFORM)))
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_CAMERA_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_IOMMU_HEAP # Don't Care
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_IOMMU_HEAP_ID # EBI
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=0
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=5
+else
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_HEAP_ID=GRALLOC_USAGE_PRIVATE_CAMERA_HEAP
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_FALLBACK_HEAP_ID=GRALLOC_USAGE_PRIVATE_CAMERA_HEAP # Don't Care
+        LOCAL_CFLAGS += -DCAMERA_GRALLOC_CACHING_ID=GRALLOC_USAGE_PRIVATE_UNCACHED #uncached
+        LOCAL_CFLAGS += -DCAMERA_ION_FALLBACK_HEAP_ID=ION_CAMERA_HEAP_ID
+        LOCAL_CFLAGS += -DNUM_RECORDING_BUFFERS=5
+endif
+LOCAL_CFLAGS += -Wall -Wextra -Werror
+
+LOCAL_SHARED_LIBRARIES:= \
+         libcutils libdl libmmcamera_interface
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_32_BIT_ONLY := $(BOARD_QTI_CAMERA_32BIT_ONLY)
+
+LOCAL_MODULE:= libmm-qcamera
+include $(BUILD_SHARED_LIBRARY)
diff --git a/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_app.h b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_app.h
new file mode 100644
index 0000000..f1c688b
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_app.h
@@ -0,0 +1,521 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_QCAMERA_APP_H__
+#define __MM_QCAMERA_APP_H__
+
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <linux/fb.h>
+#include <linux/msm_mdp.h>
+#include <semaphore.h>
+
+#include "mm_camera_interface.h"
+#include "mm_jpeg_interface.h"
+#include "mm_qcamera_socket.h"
+
+#define MM_QCAMERA_APP_INTERATION 1
+
+#define MM_APP_MAX_DUMP_FRAME_NUM 1000
+
+#define PREVIEW_BUF_NUM 7
+#define VIDEO_BUF_NUM 7
+#define ISP_PIX_BUF_NUM 9
+#define STATS_BUF_NUM 4
+#define RDI_BUF_NUM 8
+#define CAPTURE_BUF_NUM 5
+
+#define DEFAULT_PREVIEW_FORMAT    CAM_FORMAT_YUV_420_NV21
+#define DEFAULT_PREVIEW_WIDTH     640
+#define DEFAULT_PREVIEW_HEIGHT    480
+#define DEFAULT_PREVIEW_PADDING   CAM_PAD_TO_WORD
+#define DEFAULT_VIDEO_FORMAT      CAM_FORMAT_YUV_420_NV12
+#define DEFAULT_VIDEO_WIDTH       800
+#define DEFAULT_VIDEO_HEIGHT      480
+#define DEFAULT_VIDEO_PADDING     CAM_PAD_TO_2K
+#define DEFAULT_SNAPSHOT_FORMAT   CAM_FORMAT_YUV_420_NV21
+#define DEFAULT_RAW_FORMAT        CAM_FORMAT_BAYER_QCOM_RAW_10BPP_GBRG
+
+#define DEFAULT_SNAPSHOT_WIDTH    1024
+#define DEFAULT_SNAPSHOT_HEIGHT   768
+#define DEFAULT_SNAPSHOT_PADDING  CAM_PAD_TO_WORD
+
+#define DEFAULT_OV_FORMAT         MDP_Y_CRCB_H2V2
+#define DEFAULT_OV_FORMAT_BPP     3/2
+#define DEFAULT_CAMERA_FORMAT_BPP 3/2
+#define FB_PATH                   "/dev/graphics/fb0"
+#define BACKLIGHT_CONTROL         "/sys/class/leds/lcd-backlight/brightness"
+#define BACKLIGHT_LEVEL           "205"
+
+#define ENABLE_REPROCESSING       1
+
+#define INVALID_KEY_PRESS 0
+#define BASE_OFFSET  ('Z' - 'A' + 1)
+#define BASE_OFFSET_NUM  ('Z' - 'A' + 2)
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+typedef enum {
+    TUNE_CMD_INIT,
+    TUNE_CMD_GET_LIST,
+    TUNE_CMD_GET_PARAMS,
+    TUNE_CMD_SET_PARAMS,
+    TUNE_CMD_MISC,
+    TUNE_CMD_DEINIT,
+} mm_camera_tune_cmd_t;
+
+typedef enum {
+    TUNE_PREVCMD_INIT,
+    TUNE_PREVCMD_SETDIM,
+    TUNE_PREVCMD_GETINFO,
+    TUNE_PREVCMD_GETCHUNKSIZE,
+    TUNE_PREVCMD_GETFRAME,
+    TUNE_PREVCMD_UNSUPPORTED,
+    TUNE_PREVCMD_DEINIT,
+} mm_camera_tune_prevcmd_t;
+
+typedef void (*prev_callback) (mm_camera_buf_def_t *preview_frame);
+
+typedef struct {
+  char *send_buf;
+  uint32_t send_len;
+  void *next;
+} eztune_prevcmd_rsp;
+
+typedef struct {
+    int (*command_process) (void *recv, mm_camera_tune_cmd_t cmd,
+      void *param, char *send_buf, uint32_t send_len);
+    int (*prevcommand_process) (void *recv, mm_camera_tune_prevcmd_t cmd,
+      void *param, char **send_buf, uint32_t *send_len);
+    void (*prevframe_callback) (mm_camera_buf_def_t *preview_frame);
+} mm_camera_tune_func_t;
+
+typedef struct {
+    mm_camera_tune_func_t *func_tbl;
+    void *lib_handle;
+}mm_camera_tuning_lib_params_t;
+
+typedef enum {
+    MM_CAMERA_OK,
+    MM_CAMERA_E_GENERAL,
+    MM_CAMERA_E_NO_MEMORY,
+    MM_CAMERA_E_NOT_SUPPORTED,
+    MM_CAMERA_E_INVALID_INPUT,
+    MM_CAMERA_E_INVALID_OPERATION, /* 5 */
+    MM_CAMERA_E_ENCODE,
+    MM_CAMERA_E_BUFFER_REG,
+    MM_CAMERA_E_PMEM_ALLOC,
+    MM_CAMERA_E_CAPTURE_FAILED,
+    MM_CAMERA_E_CAPTURE_TIMEOUT, /* 10 */
+} mm_camera_status_type_t;
+
+typedef enum {
+    MM_CHANNEL_TYPE_ZSL,      /* preview, and snapshot main */
+    MM_CHANNEL_TYPE_CAPTURE,  /* snapshot main, and postview */
+    MM_CHANNEL_TYPE_PREVIEW,  /* preview only */
+    MM_CHANNEL_TYPE_SNAPSHOT, /* snapshot main only */
+    MM_CHANNEL_TYPE_VIDEO,    /* video only */
+    MM_CHANNEL_TYPE_RDI,      /* rdi only */
+    MM_CHANNEL_TYPE_REPROCESS,/* offline reprocess */
+    MM_CHANNEL_TYPE_MAX
+} mm_camera_channel_type_t;
+
+typedef struct {
+    int                     fd;
+    int                     main_ion_fd;
+    ion_user_handle_t       handle;
+    size_t                  size;
+    parm_buffer_t          *data;
+} mm_camera_app_meminfo_t;
+
+typedef struct {
+    mm_camera_buf_def_t buf;
+    mm_camera_app_meminfo_t mem_info;
+} mm_camera_app_buf_t;
+
+typedef struct {
+    uint32_t s_id;
+    mm_camera_stream_config_t s_config;
+    cam_frame_len_offset_t offset;
+    uint8_t num_of_bufs;
+    uint32_t multipleOf;
+    mm_camera_app_buf_t s_bufs[MM_CAMERA_MAX_NUM_FRAMES];
+    mm_camera_app_buf_t s_info_buf;
+} mm_camera_stream_t;
+
+typedef struct {
+    uint32_t ch_id;
+    uint8_t num_streams;
+    mm_camera_stream_t streams[MAX_STREAM_NUM_IN_BUNDLE];
+} mm_camera_channel_t;
+
+typedef void (*release_data_fn)(void* data, void *user_data);
+
+typedef struct {
+    struct cam_list list;
+    void* data;
+} camera_q_node;
+
+typedef struct {
+    camera_q_node m_head;
+    int m_size;
+    pthread_mutex_t m_lock;
+    release_data_fn m_dataFn;
+    void * m_userData;
+} mm_camera_queue_t;
+
+typedef struct {
+    uint16_t user_input_display_width;
+    uint16_t user_input_display_height;
+} USER_INPUT_DISPLAY_T;
+
+typedef struct {
+    mm_camera_vtbl_t *cam;
+    uint8_t num_channels;
+    mm_camera_channel_t channels[MM_CHANNEL_TYPE_MAX];
+    mm_jpeg_ops_t jpeg_ops;
+    uint32_t jpeg_hdl;
+    mm_camera_app_buf_t cap_buf;
+    mm_camera_app_buf_t parm_buf;
+
+    uint32_t current_jpeg_sess_id;
+    mm_camera_super_buf_t* current_job_frames;
+    uint32_t current_job_id;
+    mm_camera_app_buf_t jpeg_buf;
+
+    int fb_fd;
+    struct fb_var_screeninfo vinfo;
+    struct mdp_overlay data_overlay;
+    uint32_t slice_size;
+    uint32_t buffer_width, buffer_height;
+    uint32_t buffer_size;
+    cam_format_t buffer_format;
+    uint32_t frame_size;
+    uint32_t frame_count;
+    int encodeJpeg;
+    int zsl_enabled;
+    int8_t focus_supported;
+    prev_callback user_preview_cb;
+    parm_buffer_t *params_buffer;
+    USER_INPUT_DISPLAY_T preview_resolution;
+
+    //Reprocess params&stream
+    int8_t enable_reproc;
+    int32_t reproc_sharpness;
+    cam_denoise_param_t reproc_wnr;
+    int8_t enable_CAC;
+    mm_camera_queue_t pp_frames;
+    mm_camera_stream_t *reproc_stream;
+    metadata_buffer_t *metadata;
+    int8_t is_chromatix_reload;
+    tune_chromatix_t tune_data;
+} mm_camera_test_obj_t;
+
+typedef struct {
+  void *ptr;
+  void* ptr_jpeg;
+
+  uint8_t (*get_num_of_cameras) ();
+  int32_t (*mm_camera_open) (uint8_t camera_idx, mm_camera_vtbl_t **camera_vtbl);
+  uint32_t (*jpeg_open) (mm_jpeg_ops_t *ops, mm_dimension picture_size);
+
+} hal_interface_lib_t;
+
+typedef struct {
+    uint8_t num_cameras;
+    hal_interface_lib_t hal_lib;
+} mm_camera_app_t;
+
+typedef struct {
+    uint32_t width;
+    uint32_t height;
+} mm_camera_lib_snapshot_params;
+
+typedef enum {
+    MM_CAMERA_LIB_NO_ACTION = 0,
+    MM_CAMERA_LIB_RAW_CAPTURE,
+    MM_CAMERA_LIB_JPEG_CAPTURE,
+    MM_CAMERA_LIB_SET_FOCUS_MODE,
+    MM_CAMERA_LIB_DO_AF,
+    MM_CAMERA_LIB_CANCEL_AF,
+    MM_CAMERA_LIB_LOCK_AE,
+    MM_CAMERA_LIB_UNLOCK_AE,
+    MM_CAMERA_LIB_LOCK_AWB,
+    MM_CAMERA_LIB_UNLOCK_AWB,
+    MM_CAMERA_LIB_GET_CHROMATIX,
+    MM_CAMERA_LIB_SET_RELOAD_CHROMATIX,
+    MM_CAMERA_LIB_GET_AFTUNE,
+    MM_CAMERA_LIB_SET_RELOAD_AFTUNE,
+    MM_CAMERA_LIB_SET_AUTOFOCUS_TUNING,
+    MM_CAMERA_LIB_ZSL_ENABLE,
+    MM_CAMERA_LIB_EV,
+    MM_CAMERA_LIB_ANTIBANDING,
+    MM_CAMERA_LIB_SET_VFE_COMMAND,
+    MM_CAMERA_LIB_SET_POSTPROC_COMMAND,
+    MM_CAMERA_LIB_SET_3A_COMMAND,
+    MM_CAMERA_LIB_AEC_ENABLE,
+    MM_CAMERA_LIB_AEC_DISABLE,
+    MM_CAMERA_LIB_AF_ENABLE,
+    MM_CAMERA_LIB_AF_DISABLE,
+    MM_CAMERA_LIB_AWB_ENABLE,
+    MM_CAMERA_LIB_AWB_DISABLE,
+    MM_CAMERA_LIB_AEC_FORCE_LC,
+    MM_CAMERA_LIB_AEC_FORCE_GAIN,
+    MM_CAMERA_LIB_AEC_FORCE_EXP,
+    MM_CAMERA_LIB_AEC_FORCE_SNAP_LC,
+    MM_CAMERA_LIB_AEC_FORCE_SNAP_GAIN,
+    MM_CAMERA_LIB_AEC_FORCE_SNAP_EXP,
+    MM_CAMERA_LIB_WB,
+    MM_CAMERA_LIB_EXPOSURE_METERING,
+    MM_CAMERA_LIB_BRIGHTNESS,
+    MM_CAMERA_LIB_CONTRAST,
+    MM_CAMERA_LIB_SATURATION,
+    MM_CAMERA_LIB_SHARPNESS,
+    MM_CAMERA_LIB_ISO,
+    MM_CAMERA_LIB_ZOOM,
+    MM_CAMERA_LIB_BESTSHOT,
+    MM_CAMERA_LIB_FLASH,
+    MM_CAMERA_LIB_FPS_RANGE,
+    MM_CAMERA_LIB_WNR_ENABLE,
+    MM_CAMERA_LIB_SET_TINTLESS,
+} mm_camera_lib_commands;
+
+typedef struct {
+    int32_t stream_width, stream_height;
+    cam_focus_mode_type af_mode;
+} mm_camera_lib_params;
+
+typedef struct {
+  tuneserver_protocol_t *proto;
+  int clientsocket_id;
+  prserver_protocol_t *pr_proto;
+  int pr_clientsocket_id;
+  mm_camera_tuning_lib_params_t tuning_params;
+} tuningserver_t;
+
+typedef struct {
+    mm_camera_app_t app_ctx;
+    mm_camera_test_obj_t test_obj;
+    mm_camera_lib_params current_params;
+    int stream_running;
+    tuningserver_t tsctrl;
+} mm_camera_lib_ctx;
+
+typedef mm_camera_lib_ctx mm_camera_lib_handle;
+
+typedef int (*mm_app_test_t) (mm_camera_app_t *cam_apps);
+typedef struct {
+    mm_app_test_t f;
+    int r;
+} mm_app_tc_t;
+
+extern int mm_app_unit_test_entry(mm_camera_app_t *cam_app);
+extern int mm_app_dual_test_entry(mm_camera_app_t *cam_app);
+extern void mm_app_dump_frame(mm_camera_buf_def_t *frame,
+                              char *name,
+                              char *ext,
+                              uint32_t frame_idx);
+extern void mm_app_dump_jpeg_frame(const void * data,
+                                   size_t size,
+                                   char* name,
+                                   char* ext,
+                                   uint32_t index);
+extern int mm_camera_app_timedwait(uint8_t seconds);
+extern int mm_camera_app_wait();
+extern void mm_camera_app_done();
+extern int mm_app_alloc_bufs(mm_camera_app_buf_t* app_bufs,
+                             cam_frame_len_offset_t *frame_offset_info,
+                             uint8_t num_bufs,
+                             uint8_t is_streambuf,
+                             size_t multipleOf);
+extern int mm_app_release_bufs(uint8_t num_bufs,
+                               mm_camera_app_buf_t* app_bufs);
+extern int mm_app_stream_initbuf(cam_frame_len_offset_t *frame_offset_info,
+                                 uint8_t *num_bufs,
+                                 uint8_t **initial_reg_flag,
+                                 mm_camera_buf_def_t **bufs,
+                                 mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                                 void *user_data);
+extern int32_t mm_app_stream_deinitbuf(mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                                       void *user_data);
+extern int mm_app_cache_ops(mm_camera_app_meminfo_t *mem_info, int cmd);
+extern int32_t mm_app_stream_clean_invalidate_buf(uint32_t index, void *user_data);
+extern int32_t mm_app_stream_invalidate_buf(uint32_t index, void *user_data);
+extern int mm_app_open(mm_camera_app_t *cam_app,
+                       int cam_id,
+                       mm_camera_test_obj_t *test_obj);
+extern int mm_app_close(mm_camera_test_obj_t *test_obj);
+extern mm_camera_channel_t * mm_app_add_channel(
+                                         mm_camera_test_obj_t *test_obj,
+                                         mm_camera_channel_type_t ch_type,
+                                         mm_camera_channel_attr_t *attr,
+                                         mm_camera_buf_notify_t channel_cb,
+                                         void *userdata);
+extern int mm_app_del_channel(mm_camera_test_obj_t *test_obj,
+                              mm_camera_channel_t *channel);
+extern mm_camera_stream_t * mm_app_add_stream(mm_camera_test_obj_t *test_obj,
+                                              mm_camera_channel_t *channel);
+extern int mm_app_del_stream(mm_camera_test_obj_t *test_obj,
+                             mm_camera_channel_t *channel,
+                             mm_camera_stream_t *stream);
+extern int mm_app_config_stream(mm_camera_test_obj_t *test_obj,
+                                mm_camera_channel_t *channel,
+                                mm_camera_stream_t *stream,
+                                mm_camera_stream_config_t *config);
+extern int mm_app_start_channel(mm_camera_test_obj_t *test_obj,
+                                mm_camera_channel_t *channel);
+extern int mm_app_stop_channel(mm_camera_test_obj_t *test_obj,
+                               mm_camera_channel_t *channel);
+extern mm_camera_channel_t *mm_app_get_channel_by_type(
+                                    mm_camera_test_obj_t *test_obj,
+                                    mm_camera_channel_type_t ch_type);
+
+extern int mm_app_start_preview(mm_camera_test_obj_t *test_obj);
+extern int mm_app_stop_preview(mm_camera_test_obj_t *test_obj);
+extern int mm_app_start_preview_zsl(mm_camera_test_obj_t *test_obj);
+extern int mm_app_stop_preview_zsl(mm_camera_test_obj_t *test_obj);
+extern mm_camera_channel_t * mm_app_add_preview_channel(
+                                mm_camera_test_obj_t *test_obj);
+extern mm_camera_stream_t * mm_app_add_raw_stream(mm_camera_test_obj_t *test_obj,
+                                                mm_camera_channel_t *channel,
+                                                mm_camera_buf_notify_t stream_cb,
+                                                void *userdata,
+                                                uint8_t num_bufs,
+                                                uint8_t num_burst);
+extern int mm_app_stop_and_del_channel(mm_camera_test_obj_t *test_obj,
+                                       mm_camera_channel_t *channel);
+extern mm_camera_channel_t * mm_app_add_snapshot_channel(
+                                               mm_camera_test_obj_t *test_obj);
+extern mm_camera_stream_t * mm_app_add_snapshot_stream(
+                                                mm_camera_test_obj_t *test_obj,
+                                                mm_camera_channel_t *channel,
+                                                mm_camera_buf_notify_t stream_cb,
+                                                void *userdata,
+                                                uint8_t num_bufs,
+                                                uint8_t num_burst);
+extern mm_camera_stream_t * mm_app_add_metadata_stream(mm_camera_test_obj_t *test_obj,
+                                               mm_camera_channel_t *channel,
+                                               mm_camera_buf_notify_t stream_cb,
+                                               void *userdata,
+                                               uint8_t num_bufs);
+extern int mm_app_start_record_preview(mm_camera_test_obj_t *test_obj);
+extern int mm_app_stop_record_preview(mm_camera_test_obj_t *test_obj);
+extern int mm_app_start_record(mm_camera_test_obj_t *test_obj);
+extern int mm_app_stop_record(mm_camera_test_obj_t *test_obj);
+extern int mm_app_start_live_snapshot(mm_camera_test_obj_t *test_obj);
+extern int mm_app_stop_live_snapshot(mm_camera_test_obj_t *test_obj);
+extern int mm_app_start_capture(mm_camera_test_obj_t *test_obj,
+                                uint8_t num_snapshots);
+extern int mm_app_stop_capture(mm_camera_test_obj_t *test_obj);
+extern int mm_app_start_capture_raw(mm_camera_test_obj_t *test_obj,
+                                    uint8_t num_snapshots);
+extern int mm_app_stop_capture_raw(mm_camera_test_obj_t *test_obj);
+extern int mm_app_start_rdi(mm_camera_test_obj_t *test_obj, uint8_t num_burst);
+extern int mm_app_stop_rdi(mm_camera_test_obj_t *test_obj);
+extern int mm_app_initialize_fb(mm_camera_test_obj_t *test_obj);
+extern int mm_app_close_fb(mm_camera_test_obj_t *test_obj);
+extern int mm_app_fb_write(mm_camera_test_obj_t *test_obj, char *buffer);
+extern int mm_app_overlay_display(mm_camera_test_obj_t *test_obj, int bufferFd);
+extern int mm_app_allocate_ion_memory(mm_camera_app_buf_t *buf, unsigned int ion_type);
+extern int mm_app_deallocate_ion_memory(mm_camera_app_buf_t *buf);
+extern int mm_app_set_params(mm_camera_test_obj_t *test_obj,
+                      cam_intf_parm_type_t param_type,
+                      int32_t value);
+extern int mm_app_set_preview_fps_range(mm_camera_test_obj_t *test_obj,
+                        cam_fps_range_t *fpsRange);
+/* JIG camera lib interface */
+
+int mm_camera_lib_open(mm_camera_lib_handle *handle, int cam_id);
+int mm_camera_lib_get_caps(mm_camera_lib_handle *handle,
+                           cam_capability_t *caps);
+int mm_camera_lib_start_stream(mm_camera_lib_handle *handle);
+int mm_camera_lib_send_command(mm_camera_lib_handle *handle,
+                               mm_camera_lib_commands cmd,
+                               void *data, void *out_data);
+int mm_camera_lib_stop_stream(mm_camera_lib_handle *handle);
+int mm_camera_lib_number_of_cameras(mm_camera_lib_handle *handle);
+int mm_camera_lib_close(mm_camera_lib_handle *handle);
+int32_t mm_camera_load_tuninglibrary(
+  mm_camera_tuning_lib_params_t *tuning_param);
+int mm_camera_lib_set_preview_usercb(
+  mm_camera_lib_handle *handle, prev_callback cb);
+//
+
+int mm_app_start_regression_test(int run_tc);
+int mm_app_load_hal(mm_camera_app_t *my_cam_app);
+
+extern int createEncodingSession(mm_camera_test_obj_t *test_obj,
+                          mm_camera_stream_t *m_stream,
+                          mm_camera_buf_def_t *m_frame);
+extern int encodeData(mm_camera_test_obj_t *test_obj, mm_camera_super_buf_t* recvd_frame,
+               mm_camera_stream_t *m_stream);
+extern int mm_app_take_picture(mm_camera_test_obj_t *test_obj, uint8_t);
+
+extern mm_camera_channel_t * mm_app_add_reprocess_channel(mm_camera_test_obj_t *test_obj,
+                                                   mm_camera_stream_t *source_stream);
+extern int mm_app_start_reprocess(mm_camera_test_obj_t *test_obj);
+extern int mm_app_stop_reprocess(mm_camera_test_obj_t *test_obj);
+extern int mm_app_do_reprocess(mm_camera_test_obj_t *test_obj,
+        mm_camera_buf_def_t *frame,
+        uint32_t meta_idx,
+        mm_camera_super_buf_t *super_buf,
+        mm_camera_stream_t *src_meta);
+extern void mm_app_release_ppinput(void *data, void *user_data);
+
+extern int mm_camera_queue_init(mm_camera_queue_t *queue,
+                         release_data_fn data_rel_fn,
+                         void *user_data);
+extern int mm_qcamera_queue_release(mm_camera_queue_t *queue);
+extern int mm_qcamera_queue_isempty(mm_camera_queue_t *queue);
+extern int mm_qcamera_queue_enqueue(mm_camera_queue_t *queue, void *data);
+extern void* mm_qcamera_queue_dequeue(mm_camera_queue_t *queue,
+                                      int bFromHead);
+extern void mm_qcamera_queue_flush(mm_camera_queue_t *queue);
+
+#endif /* __MM_QCAMERA_APP_H__ */
+
+
+
+
+
+
+
+
+
diff --git a/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_commands.h b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_commands.h
new file mode 100644
index 0000000..1540c20
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_commands.h
@@ -0,0 +1,67 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_QCAMERA_COMMANDS_H__
+#define __MM_QCAMERA_COMMANDS_H__
+
+#include "mm_qcamera_socket.h"
+#include "mm_qcamera_app.h"
+
+int tuneserver_close_cam(mm_camera_lib_handle *lib_handle);
+int tuneserver_stop_cam(mm_camera_lib_handle *lib_handle);
+int tuneserver_open_cam(mm_camera_lib_handle *lib_handle);
+
+int tuneserver_initialize_tuningp(void * ctrl, int client_socket_id,
+  char *send_buf, uint32_t send_len);
+int tuneserver_deinitialize_tuningp(void * ctrl, int client_socket_id,
+  char *send_buf, uint32_t send_len);
+int tuneserver_process_get_list_cmd(void * ctrl, void *recv_cmd,
+  char *send_buf, uint32_t send_len);
+int tuneserver_process_misc_cmd(void * ctrl, void *recv_cmd,
+  char *send_buf, uint32_t send_len);
+int tuneserver_process_get_params_cmd(void * ctrl, void *recv_cmd,
+  char *send_buf, uint32_t send_len);
+int tuneserver_process_set_params_cmd(void * ctrl, void *recv_cmd,
+  char *send_buf, uint32_t send_len);
+
+int tuneserver_initialize_prevtuningp(void * ctrl,
+  int pr_client_socket_id, cam_dimension_t dimension,
+  char **send_buf, uint32_t *send_len);
+int tuneserver_deinitialize_prevtuningp(void * ctrl,
+  char **send_buf, uint32_t *send_len);
+int tuneserver_preview_getinfo(void * ctrl,
+  char **send_buf, uint32_t *send_len);
+int tuneserver_preview_getchunksize(void * ctrl,
+  char **send_buf, uint32_t *send_len);
+int tuneserver_preview_getframe(void * ctrl,
+  char **send_buf, uint32_t *send_len);
+int tuneserver_preview_unsupported(void * ctrl,
+  char **send_buf, uint32_t *send_len);
+
+#endif /*__MM_QCAMERA_COMMANDS_H__*/
diff --git a/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_dbg.h b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_dbg.h
new file mode 100755
index 0000000..bb5ab81
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_dbg.h
@@ -0,0 +1,71 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_QCAMERA_DBG_H__
+#define __MM_QCAMERA_DBG_H__
+
+//#define LOG_DEBUG 1
+
+#ifndef LOG_DEBUG
+  #ifdef _ANDROID_
+    #undef LOG_NIDEBUG
+    #undef LOG_TAG
+    #define LOG_NIDEBUG 0
+    #define LOG_TAG "mm-camera-test"
+    #include <utils/Log.h>
+  #else
+    #include <stdio.h>
+    #define ALOGE CDBG
+  #endif
+  #undef CDBG
+  #define CDBG(fmt, args...) do{}while(0)
+  #define CDBG_ERROR(fmt, args...) ALOGE(fmt, ##args)
+#else
+  #ifdef _ANDROID_
+    #undef LOG_NIDEBUG
+    #undef LOG_TAG
+    #define LOG_NIDEBUG 0
+    #define LOG_TAG "mm-camera-test"
+    #include <utils/Log.h>
+    #define CDBG(fmt, args...) ALOGE(fmt, ##args)
+  #else
+    #include <stdio.h>
+    #define CDBG(fmt, args...) fprintf(stderr, fmt, ##args)
+    #define ALOGE(fmt, args...) fprintf(stderr, fmt, ##args)
+  #endif
+#endif
+
+#ifdef _ANDROID_
+  #define CDBG_HIGH(fmt, args...)  ALOGE(fmt, ##args)
+  #define CDBG_ERROR(fmt, args...)  ALOGE(fmt, ##args)
+#else
+  #define CDBG_HIGH(fmt, args...) fprintf(stderr, fmt, ##args)
+  #define CDBG_ERROR(fmt, args...) fprintf(stderr, fmt, ##args)
+#endif
+#endif /* __MM_QCAMERA_DBG_H__ */
diff --git a/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_main_menu.h b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_main_menu.h
new file mode 100644
index 0000000..06e3827
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_main_menu.h
@@ -0,0 +1,438 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_QCAMERA_MAIN_MENU_H__
+#define __MM_QCAMERA_MAIN_MENU_H__
+
+#include "mm_camera_interface.h"
+#include "mm_jpeg_interface.h"
+
+#define VIDEO_BUFFER_SIZE       (PREVIEW_WIDTH * PREVIEW_HEIGHT * 3/2)
+#define THUMBNAIL_BUFFER_SIZE   (THUMBNAIL_WIDTH * THUMBNAIL_HEIGHT * 3/2)
+#define SNAPSHOT_BUFFER_SIZE    (PICTURE_WIDTH * PICTURE_HEIGHT * 3/2)
+
+/*===========================================================================
+ * Macro
+ *===========================================================================*/
+#define PREVIEW_FRAMES_NUM    5
+#define VIDEO_FRAMES_NUM      5
+#define THUMBNAIL_FRAMES_NUM  1
+#define SNAPSHOT_FRAMES_NUM   1
+#define MAX_NUM_FORMAT        32
+
+typedef enum
+{
+  START_PREVIEW,
+  STOP_PREVIEW,
+  SET_WHITE_BALANCE,
+  SET_TINTLESS_ENABLE,
+  SET_TINTLESS_DISABLE,
+  SET_EXP_METERING,
+  GET_CTRL_VALUE,
+  TOGGLE_AFR,
+  SET_ISO,
+  BRIGHTNESS_GOTO_SUBMENU,
+  CONTRAST_GOTO_SUBMENU,
+  EV_GOTO_SUBMENU,
+  SATURATION_GOTO_SUBMENU,
+  SET_ZOOM,
+  SET_SHARPNESS,
+  TAKE_JPEG_SNAPSHOT,
+  START_RECORDING,
+  STOP_RECORDING,
+  BEST_SHOT,
+  LIVE_SHOT,
+  FLASH_MODES,
+  TOGGLE_ZSL,
+  TAKE_RAW_SNAPSHOT,
+  SWITCH_SNAP_RESOLUTION,
+  TOGGLE_WNR,
+  EXIT
+} Camera_main_menu_t;
+
+typedef enum
+{
+  ACTION_NO_ACTION,
+  ACTION_START_PREVIEW,
+  ACTION_STOP_PREVIEW,
+  ACTION_SET_WHITE_BALANCE,
+  ACTION_SET_TINTLESS_ENABLE,
+  ACTION_SET_TINTLESS_DISABLE,
+  ACTION_SET_EXP_METERING,
+  ACTION_GET_CTRL_VALUE,
+  ACTION_TOGGLE_AFR,
+  ACTION_SET_ISO,
+  ACTION_BRIGHTNESS_INCREASE,
+  ACTION_BRIGHTNESS_DECREASE,
+  ACTION_CONTRAST_INCREASE,
+  ACTION_CONTRAST_DECREASE,
+  ACTION_EV_INCREASE,
+  ACTION_EV_DECREASE,
+  ACTION_SATURATION_INCREASE,
+  ACTION_SATURATION_DECREASE,
+  ACTION_SET_ZOOM,
+  ACTION_SHARPNESS_INCREASE,
+  ACTION_SHARPNESS_DECREASE,
+  ACTION_TAKE_JPEG_SNAPSHOT,
+  ACTION_START_RECORDING,
+  ACTION_STOP_RECORDING,
+  ACTION_SET_BESTSHOT_MODE,
+  ACTION_TAKE_LIVE_SNAPSHOT,
+  ACTION_SET_FLASH_MODE,
+  ACTION_SWITCH_CAMERA,
+  ACTION_TOGGLE_ZSL,
+  ACTION_TAKE_RAW_SNAPSHOT,
+  ACTION_SWITCH_RESOLUTION,
+  ACTION_TOGGLE_WNR,
+  ACTION_EXIT
+} camera_action_t;
+
+#define INVALID_KEY_PRESS 0
+#define BASE_OFFSET  ('Z' - 'A' + 1)
+#define BASE_OFFSET_NUM  ('Z' - 'A' + 2)
+#define PAD_TO_WORD(a)  (((a)+3)&~3)
+
+
+#define SQCIF_WIDTH     128
+#define SQCIF_HEIGHT     96
+#define QCIF_WIDTH      176
+#define QCIF_HEIGHT     144
+#define QVGA_WIDTH      320
+#define QVGA_HEIGHT     240
+#define HD_THUMBNAIL_WIDTH      256
+#define HD_THUMBNAIL_HEIGHT     144
+#define CIF_WIDTH       352
+#define CIF_HEIGHT      288
+#define VGA_WIDTH       640
+#define VGA_HEIGHT      480
+#define WVGA_WIDTH      800
+#define WVGA_HEIGHT     480
+#define WVGA_PLUS_WIDTH      960
+#define WVGA_PLUS_HEIGHT     720
+
+#define MP1_WIDTH      1280
+#define MP1_HEIGHT      960
+#define MP2_WIDTH      1600
+#define MP2_HEIGHT     1200
+#define MP3_WIDTH      2048
+#define MP3_HEIGHT     1536
+#define MP5_WIDTH      2592
+#define MP5_HEIGHT     1944
+#define MP8_WIDTH      3264
+#define MP8_HEIGHT     2448
+#define MP12_WIDTH     4000
+#define MP12_HEIGHT    3000
+
+#define SVGA_WIDTH      800
+#define SVGA_HEIGHT     600
+#define XGA_WIDTH      1024
+#define XGA_HEIGHT      768
+#define HD720_WIDTH    1280
+#define HD720_HEIGHT    720
+#define HD720_PLUS_WIDTH    1440
+#define HD720_PLUS_HEIGHT   1080
+#define WXGA_WIDTH     1280
+#define WXGA_HEIGHT     768
+#define HD1080_WIDTH   1920
+#define HD1080_HEIGHT  1080
+
+
+#define ONEMP_WIDTH    1280
+#define SXGA_WIDTH     1280
+#define UXGA_WIDTH     1600
+#define QXGA_WIDTH     2048
+#define FIVEMP_WIDTH   2560
+
+
+#define ONEMP_HEIGHT    960
+#define SXGA_HEIGHT     1024
+#define UXGA_HEIGHT     1200
+#define QXGA_HEIGHT     1536
+#define FIVEMP_HEIGHT   1920
+
+
+typedef enum
+{
+  RESOLUTION_MIN,
+  QCIF                  = RESOLUTION_MIN,
+  QVGA,
+  VGA,
+  WVGA,
+  WVGA_PLUS ,
+  HD720,
+  HD720_PLUS,
+  HD1080,
+  RESOLUTION_PREVIEW_VIDEO_MAX = HD1080,
+  WXGA,
+  MP1,
+  MP2,
+  MP3,
+  MP5,
+  MP8,
+  MP12,
+  RESOLUTION_MAX         = MP12,
+} Camera_Resolution;
+
+typedef struct{
+    uint16_t width;
+    uint16_t  height;
+    char * name;
+    char * str_name;
+    int supported;
+} DIMENSION_TBL_T;
+
+typedef enum {
+    WHITE_BALANCE_STATE,
+    WHITE_BALANCE_TEMPERATURE,
+    BRIGHTNESS_CTRL,
+    EV,
+    CONTRAST_CTRL,
+    SATURATION_CTRL,
+    SHARPNESS_CTRL
+} Get_Ctrl_modes;
+
+typedef enum {
+    AUTO_EXP_FRAME_AVG,
+    AUTO_EXP_CENTER_WEIGHTED,
+    AUTO_EXP_SPOT_METERING,
+    AUTO_EXP_SMART_METERING,
+    AUTO_EXP_USER_METERING,
+    AUTO_EXP_SPOT_METERING_ADV,
+    AUTO_EXP_CENTER_WEIGHTED_ADV,
+    AUTO_EXP_MAX
+} Exp_Metering_modes;
+
+typedef enum {
+  ISO_AUTO,
+  ISO_DEBLUR,
+  ISO_100,
+  ISO_200,
+  ISO_400,
+  ISO_800,
+  ISO_1600,
+  ISO_MAX
+} ISO_modes;
+
+typedef enum {
+  BESTSHOT_AUTO,
+  BESTSHOT_ACTION,
+  BESTSHOT_PORTRAIT,
+  BESTSHOT_LANDSCAPE,
+  BESTSHOT_NIGHT,
+  BESTSHOT_NIGHT_PORTRAIT,
+  BESTSHOT_THEATRE,
+  BESTSHOT_BEACH,
+  BESTSHOT_SNOW,
+  BESTSHOT_SUNSET,
+  BESTSHOT_ANTISHAKE,
+  BESTSHOT_FIREWORKS,
+  BESTSHOT_SPORTS,
+  BESTSHOT_PARTY,
+  BESTSHOT_CANDLELIGHT,
+  BESTSHOT_ASD,
+  BESTSHOT_BACKLIGHT,
+  BESTSHOT_FLOWERS,
+  BESTSHOT_AR,
+  BESTSHOT_HDR,
+  BESTSHOT_MAX
+}Bestshot_modes;
+
+typedef enum {
+    FLASH_MODE_OFF,
+    FLASH_MODE_AUTO,
+    FLASH_MODE_ON,
+    FLASH_MODE_TORCH,
+    FLASH_MODE_MAX,
+}Flash_modes;
+
+typedef enum {
+  WB_AUTO,
+  WB_INCANDESCENT,
+  WB_FLUORESCENT,
+  WB_WARM_FLUORESCENT,
+  WB_DAYLIGHT,
+  WB_CLOUDY_DAYLIGHT,
+  WB_TWILIGHT,
+  WB_SHADE,
+  WB_MAX
+} White_Balance_modes;
+
+typedef enum
+{
+  MENU_ID_MAIN,
+  MENU_ID_WHITEBALANCECHANGE,
+  MENU_ID_EXPMETERINGCHANGE,
+  MENU_ID_GET_CTRL_VALUE,
+  MENU_ID_TOGGLEAFR,
+  MENU_ID_ISOCHANGE,
+  MENU_ID_BRIGHTNESSCHANGE,
+  MENU_ID_CONTRASTCHANGE,
+  MENU_ID_EVCHANGE,
+  MENU_ID_SATURATIONCHANGE,
+  MENU_ID_ZOOMCHANGE,
+  MENU_ID_SHARPNESSCHANGE,
+  MENU_ID_BESTSHOT,
+  MENU_ID_FLASHMODE,
+  MENU_ID_SENSORS,
+  MENU_ID_SWITCH_RES,
+  MENU_ID_INVALID,
+} menu_id_change_t;
+
+typedef enum
+{
+  DECREASE_ZOOM,
+  INCREASE_ZOOM,
+  INCREASE_STEP_ZOOM,
+  DECREASE_STEP_ZOOM,
+} Camera_Zoom;
+
+typedef enum
+{
+  INC_CONTRAST,
+  DEC_CONTRAST,
+} Camera_Contrast_changes;
+
+typedef enum
+{
+  INC_BRIGHTNESS,
+  DEC_BRIGHTNESS,
+} Camera_Brightness_changes;
+
+typedef enum
+{
+  INCREASE_EV,
+  DECREASE_EV,
+} Camera_EV_changes;
+
+typedef enum {
+  INC_SATURATION,
+  DEC_SATURATION,
+} Camera_Saturation_changes;
+
+typedef enum
+{
+  INC_ISO,
+  DEC_ISO,
+} Camera_ISO_changes;
+
+typedef enum
+{
+  INC_SHARPNESS,
+  DEC_SHARPNESS,
+} Camera_Sharpness_changes;
+
+typedef enum {
+  ZOOM_IN,
+  ZOOM_OUT,
+} Zoom_direction;
+
+typedef struct{
+    Camera_main_menu_t main_menu;
+    char * menu_name;
+} CAMERA_MAIN_MENU_TBL_T;
+
+typedef struct{
+    char * menu_name;
+    int present;
+} CAMERA_SENSOR_MENU_TLB_T;
+
+typedef struct{
+    Camera_Resolution cs_id;
+    uint16_t width;
+    uint16_t  height;
+    char * name;
+    char * str_name;
+} PREVIEW_DIMENSION_TBL_T;
+
+typedef struct {
+  White_Balance_modes wb_id;
+  char * wb_name;
+} WHITE_BALANCE_TBL_T;
+
+typedef struct {
+  Get_Ctrl_modes get_ctrl_id;
+  char * get_ctrl_name;
+} GET_CTRL_TBL_T;
+
+typedef struct{
+  Exp_Metering_modes exp_metering_id;
+  char * exp_metering_name;
+} EXP_METERING_TBL_T;
+
+typedef struct {
+  Bestshot_modes bs_id;
+  char *name;
+} BESTSHOT_MODE_TBT_T;
+
+typedef struct {
+  Flash_modes bs_id;
+  char *name;
+} FLASH_MODE_TBL_T;
+
+typedef struct {
+  ISO_modes iso_modes;
+  char *iso_modes_name;
+} ISO_TBL_T;
+
+typedef struct {
+  Zoom_direction zoom_direction;
+  char * zoom_direction_name;
+} ZOOM_TBL_T;
+
+typedef struct {
+  Camera_Sharpness_changes sharpness_change;
+  char *sharpness_change_name;
+} SHARPNESS_TBL_T;
+
+typedef struct {
+  Camera_Brightness_changes bc_id;
+  char * brightness_name;
+} CAMERA_BRIGHTNESS_TBL_T;
+
+typedef struct {
+  Camera_Contrast_changes cc_id;
+  char * contrast_name;
+} CAMERA_CONTRST_TBL_T;
+
+typedef struct {
+  Camera_EV_changes ec_id;
+  char * EV_name;
+} CAMERA_EV_TBL_T;
+
+typedef struct {
+  Camera_Saturation_changes sc_id;
+  char * saturation_name;
+} CAMERA_SATURATION_TBL_T;
+
+typedef struct {
+  Camera_Sharpness_changes bc_id;
+  char * sharpness_name;
+} CAMERA_SHARPNESS_TBL_T;
+
+#endif /* __MM_QCAMERA_MAIN_MENU_H__ */
diff --git a/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_socket.h b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_socket.h
new file mode 100644
index 0000000..d9bd71d
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/inc/mm_qcamera_socket.h
@@ -0,0 +1,129 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_QCAMERA_SOCKET_H__
+#define __MM_QCAMERA_SOCKET_H__
+
+#include <stdint.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <termios.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <signal.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/time.h>
+#include <linux/socket.h>
+#include <arpa/inet.h>
+#include <utils/Log.h>
+
+#undef __FD_SET
+#define __FD_SET(fd, fdsetp) \
+  (((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] |= (1LU<<((fd) & 31)))
+
+#undef __FD_CLR
+#define __FD_CLR(fd, fdsetp) \
+  (((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] &= ~(1LU<<((fd) & 31)))
+
+#undef  __FD_ISSET
+#define __FD_ISSET(fd, fdsetp) \
+  ((((fd_set *)(fdsetp))->fds_bits[(fd) >> 5] & (1LU<<((fd) & 31))) != 0)
+
+#undef  __FD_ZERO
+#define __FD_ZERO(fdsetp) \
+  (memset (fdsetp, 0, sizeof (*(fd_set *)(fdsetp))))
+
+#define TUNESERVER_MAX_RECV 2048
+#define TUNESERVER_MAX(a, b)  (((a) > (b)) ? (a) : (b))
+
+#define TUNESERVER_GET_LIST 1014
+#define TUNESERVER_GET_PARMS 1015
+#define TUNESERVER_SET_PARMS 1016
+#define TUNESERVER_MISC_CMDS 1021
+
+#define TUNE_PREV_GET_INFO        0x0001
+#define TUNE_PREV_CH_CNK_SIZE     0x0002
+#define TUNE_PREV_GET_PREV_FRAME  0x0003
+#define TUNE_PREV_GET_JPG_SNAP    0x0004
+#define TUNE_PREV_GET_RAW_SNAP    0x0005
+#define TUNE_PREV_GET_RAW_PREV    0x0006
+
+typedef struct {
+  char data[128];
+} tuneserver_misc_cmd;
+
+typedef enum {
+  TUNESERVER_RECV_COMMAND = 1,
+  TUNESERVER_RECV_PAYLOAD_SIZE,
+  TUNESERVER_RECV_PAYLOAD,
+  TUNESERVER_RECV_RESPONSE,
+  TUNESERVERER_RECV_INVALID,
+} tuneserver_recv_cmd_t;
+
+typedef struct {
+  uint16_t          current_cmd;
+  tuneserver_recv_cmd_t next_recv_code;
+  uint32_t          next_recv_len;
+  void              *recv_buf;
+  uint32_t          recv_len;
+  uint32_t          send_len;
+  void              *send_buf;
+} tuneserver_protocol_t;
+
+typedef enum {
+  TUNE_PREV_RECV_COMMAND = 1,
+  TUNE_PREV_RECV_NEWCNKSIZE,
+  TUNE_PREV_RECV_INVALID
+} tune_prev_cmd_t;
+
+typedef struct _eztune_preview_protocol_t {
+  uint16_t         current_cmd;
+  tune_prev_cmd_t  next_recv_code;
+  uint32_t         next_recv_len;
+  int32_t          send_len;
+  char*            send_buf;
+  uint32_t         send_buf_size;
+  uint32_t         new_cnk_size;
+  uint32_t         new_cmd_available;
+} prserver_protocol_t;
+
+typedef union {
+  struct sockaddr addr;
+  struct sockaddr_in addr_in;
+} mm_qcamera_sock_addr_t;
+
+int eztune_server_start(void *lib_handle);
+
+#endif /*__MM_QCAMERA_SOCKET_H__*/
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_app.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_app.c
new file mode 100644
index 0000000..877d9ba
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_app.c
@@ -0,0 +1,2443 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <ctype.h>
+#include <cutils/properties.h>
+#include <fcntl.h>
+#include <dlfcn.h>
+#include <linux/msm_ion.h>
+#include <sys/mman.h>
+
+#include "mm_qcamera_dbg.h"
+#include "mm_qcamera_app.h"
+
+static pthread_mutex_t app_mutex;
+static int thread_status = 0;
+static pthread_cond_t app_cond_v;
+
+#define MM_QCAMERA_APP_NANOSEC_SCALE 1000000000
+
+int mm_camera_app_timedwait(uint8_t seconds)
+{
+    int rc = 0;
+    pthread_mutex_lock(&app_mutex);
+    if(FALSE == thread_status) {
+        struct timespec tw;
+        memset(&tw, 0, sizeof tw);
+        tw.tv_sec = 0;
+        tw.tv_nsec = time(0) + seconds * MM_QCAMERA_APP_NANOSEC_SCALE;
+
+        rc = pthread_cond_timedwait(&app_cond_v, &app_mutex,&tw);
+        thread_status = FALSE;
+    }
+    pthread_mutex_unlock(&app_mutex);
+    return rc;
+}
+
+int mm_camera_app_wait()
+{
+    int rc = 0;
+    pthread_mutex_lock(&app_mutex);
+    if(FALSE == thread_status){
+        pthread_cond_wait(&app_cond_v, &app_mutex);
+    }
+    thread_status = FALSE;
+    pthread_mutex_unlock(&app_mutex);
+    return rc;
+}
+
+void mm_camera_app_done()
+{
+  pthread_mutex_lock(&app_mutex);
+  thread_status = TRUE;
+  pthread_cond_signal(&app_cond_v);
+  pthread_mutex_unlock(&app_mutex);
+}
+
+int mm_app_load_hal(mm_camera_app_t *my_cam_app)
+{
+    memset(&my_cam_app->hal_lib, 0, sizeof(hal_interface_lib_t));
+    my_cam_app->hal_lib.ptr = dlopen("libmmcamera_interface.so", RTLD_NOW);
+    my_cam_app->hal_lib.ptr_jpeg = dlopen("libmmjpeg_interface.so", RTLD_NOW);
+    if (!my_cam_app->hal_lib.ptr || !my_cam_app->hal_lib.ptr_jpeg) {
+        CDBG_ERROR("%s Error opening HAL library %s\n", __func__, dlerror());
+        return -MM_CAMERA_E_GENERAL;
+    }
+    *(void **)&(my_cam_app->hal_lib.get_num_of_cameras) =
+        dlsym(my_cam_app->hal_lib.ptr, "get_num_of_cameras");
+    *(void **)&(my_cam_app->hal_lib.mm_camera_open) =
+        dlsym(my_cam_app->hal_lib.ptr, "camera_open");
+    *(void **)&(my_cam_app->hal_lib.jpeg_open) =
+        dlsym(my_cam_app->hal_lib.ptr_jpeg, "jpeg_open");
+
+    if (my_cam_app->hal_lib.get_num_of_cameras == NULL ||
+        my_cam_app->hal_lib.mm_camera_open == NULL ||
+        my_cam_app->hal_lib.jpeg_open == NULL) {
+        CDBG_ERROR("%s Error loading HAL sym %s\n", __func__, dlerror());
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    my_cam_app->num_cameras = my_cam_app->hal_lib.get_num_of_cameras();
+    CDBG("%s: num_cameras = %d\n", __func__, my_cam_app->num_cameras);
+
+    return MM_CAMERA_OK;
+}
+
+int mm_app_allocate_ion_memory(mm_camera_app_buf_t *buf, unsigned int ion_type)
+{
+    int rc = MM_CAMERA_OK;
+    struct ion_handle_data handle_data;
+    struct ion_allocation_data alloc;
+    struct ion_fd_data ion_info_fd;
+    int main_ion_fd = -1;
+    void *data = NULL;
+
+    main_ion_fd = open("/dev/ion", O_RDONLY);
+    if (main_ion_fd <= 0) {
+        CDBG_ERROR("Ion dev open failed %s\n", strerror(errno));
+        goto ION_OPEN_FAILED;
+    }
+
+    memset(&alloc, 0, sizeof(alloc));
+    alloc.len = buf->mem_info.size;
+    /* to make it page size aligned */
+    alloc.len = (alloc.len + 4095U) & (~4095U);
+    alloc.align = 4096;
+    alloc.flags = ION_FLAG_CACHED;
+    alloc.heap_id_mask = ion_type;
+    rc = ioctl(main_ion_fd, ION_IOC_ALLOC, &alloc);
+    if (rc < 0) {
+        CDBG_ERROR("ION allocation failed\n");
+        goto ION_ALLOC_FAILED;
+    }
+
+    memset(&ion_info_fd, 0, sizeof(ion_info_fd));
+    ion_info_fd.handle = alloc.handle;
+    rc = ioctl(main_ion_fd, ION_IOC_SHARE, &ion_info_fd);
+    if (rc < 0) {
+        CDBG_ERROR("ION map failed %s\n", strerror(errno));
+        goto ION_MAP_FAILED;
+    }
+
+    data = mmap(NULL,
+                alloc.len,
+                PROT_READ  | PROT_WRITE,
+                MAP_SHARED,
+                ion_info_fd.fd,
+                0);
+
+    if (data == MAP_FAILED) {
+        CDBG_ERROR("ION_MMAP_FAILED: %s (%d)\n", strerror(errno), errno);
+        goto ION_MAP_FAILED;
+    }
+    buf->mem_info.main_ion_fd = main_ion_fd;
+    buf->mem_info.fd = ion_info_fd.fd;
+    buf->mem_info.handle = ion_info_fd.handle;
+    buf->mem_info.size = alloc.len;
+    buf->mem_info.data = data;
+    return MM_CAMERA_OK;
+
+ION_MAP_FAILED:
+    memset(&handle_data, 0, sizeof(handle_data));
+    handle_data.handle = ion_info_fd.handle;
+    ioctl(main_ion_fd, ION_IOC_FREE, &handle_data);
+ION_ALLOC_FAILED:
+    close(main_ion_fd);
+ION_OPEN_FAILED:
+    return -MM_CAMERA_E_GENERAL;
+}
+
+int mm_app_deallocate_ion_memory(mm_camera_app_buf_t *buf)
+{
+  struct ion_handle_data handle_data;
+  int rc = 0;
+
+  rc = munmap(buf->mem_info.data, buf->mem_info.size);
+
+  if (buf->mem_info.fd >= 0) {
+      close(buf->mem_info.fd);
+      buf->mem_info.fd = -1;
+  }
+
+  if (buf->mem_info.main_ion_fd >= 0) {
+      memset(&handle_data, 0, sizeof(handle_data));
+      handle_data.handle = buf->mem_info.handle;
+      ioctl(buf->mem_info.main_ion_fd, ION_IOC_FREE, &handle_data);
+      close(buf->mem_info.main_ion_fd);
+      buf->mem_info.main_ion_fd = -1;
+  }
+  return rc;
+}
+
+/* cmd = ION_IOC_CLEAN_CACHES, ION_IOC_INV_CACHES, ION_IOC_CLEAN_INV_CACHES */
+int mm_app_cache_ops(mm_camera_app_meminfo_t *mem_info,
+                     int cmd)
+{
+    struct ion_flush_data cache_inv_data;
+    struct ion_custom_data custom_data;
+    int ret = MM_CAMERA_OK;
+
+#ifdef USE_ION
+    if (NULL == mem_info) {
+        CDBG_ERROR("%s: mem_info is NULL, return here", __func__);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    memset(&cache_inv_data, 0, sizeof(cache_inv_data));
+    memset(&custom_data, 0, sizeof(custom_data));
+    cache_inv_data.vaddr = mem_info->data;
+    cache_inv_data.fd = mem_info->fd;
+    cache_inv_data.handle = mem_info->handle;
+    cache_inv_data.length = (unsigned int)mem_info->size;
+    custom_data.cmd = (unsigned int)cmd;
+    custom_data.arg = (unsigned long)&cache_inv_data;
+
+    CDBG("addr = %p, fd = %d, handle = %lx length = %d, ION Fd = %d",
+         cache_inv_data.vaddr, cache_inv_data.fd,
+         (unsigned long)cache_inv_data.handle, cache_inv_data.length,
+         mem_info->main_ion_fd);
+    if(mem_info->main_ion_fd >= 0) {
+        if(ioctl(mem_info->main_ion_fd, ION_IOC_CUSTOM, &custom_data) < 0) {
+            ALOGE("%s: Cache Invalidate failed\n", __func__);
+            ret = -MM_CAMERA_E_GENERAL;
+        }
+    }
+#endif
+
+    return ret;
+}
+
+void mm_app_dump_frame(mm_camera_buf_def_t *frame,
+                       char *name,
+                       char *ext,
+                       uint32_t frame_idx)
+{
+    char file_name[FILENAME_MAX];
+    int file_fd;
+    int i;
+    int offset = 0;
+    if ( frame != NULL) {
+        snprintf(file_name, sizeof(file_name),
+                QCAMERA_DUMP_FRM_LOCATION"test/%s_%04d.%s", name, frame_idx, ext);
+        file_fd = open(file_name, O_RDWR | O_CREAT, 0777);
+        if (file_fd < 0) {
+            CDBG_ERROR("%s: cannot open file %s \n", __func__, file_name);
+        } else {
+            for (i = 0; i < frame->planes_buf.num_planes; i++) {
+                CDBG("%s: saving file from address: %p, data offset: %d, "
+                     "length: %d \n", __func__, frame->buffer,
+                    frame->planes_buf.planes[i].data_offset, frame->planes_buf.planes[i].length);
+                write(file_fd,
+                      (uint8_t *)frame->buffer + offset,
+                      frame->planes_buf.planes[i].length);
+                offset += (int)frame->planes_buf.planes[i].length;
+            }
+
+            close(file_fd);
+            CDBG("dump %s", file_name);
+        }
+    }
+}
+
+void mm_app_dump_jpeg_frame(const void * data, size_t size, char* name,
+        char* ext, uint32_t index)
+{
+    char buf[FILENAME_MAX];
+    int file_fd;
+    if ( data != NULL) {
+        snprintf(buf, sizeof(buf),
+                QCAMERA_DUMP_FRM_LOCATION"test/%s_%u.%s", name, index, ext);
+        CDBG("%s: %s size =%zu, jobId=%u", __func__, buf, size, index);
+        file_fd = open(buf, O_RDWR | O_CREAT, 0777);
+        write(file_fd, data, size);
+        close(file_fd);
+    }
+}
+
+int mm_app_alloc_bufs(mm_camera_app_buf_t* app_bufs,
+                      cam_frame_len_offset_t *frame_offset_info,
+                      uint8_t num_bufs,
+                      uint8_t is_streambuf,
+                      size_t multipleOf)
+{
+    uint32_t i, j;
+    unsigned int ion_type = 0x1 << CAMERA_ION_FALLBACK_HEAP_ID;
+
+    if (is_streambuf) {
+        ion_type |= 0x1 << CAMERA_ION_HEAP_ID;
+    }
+
+    for (i = 0; i < num_bufs ; i++) {
+        if ( 0 < multipleOf ) {
+            size_t m = frame_offset_info->frame_len / multipleOf;
+            if ( ( frame_offset_info->frame_len % multipleOf ) != 0 ) {
+                m++;
+            }
+            app_bufs[i].mem_info.size = m * multipleOf;
+        } else {
+            app_bufs[i].mem_info.size = frame_offset_info->frame_len;
+        }
+        mm_app_allocate_ion_memory(&app_bufs[i], ion_type);
+
+        app_bufs[i].buf.buf_idx = i;
+        app_bufs[i].buf.planes_buf.num_planes = (int8_t)frame_offset_info->num_planes;
+        app_bufs[i].buf.fd = app_bufs[i].mem_info.fd;
+        app_bufs[i].buf.frame_len = app_bufs[i].mem_info.size;
+        app_bufs[i].buf.buffer = app_bufs[i].mem_info.data;
+        app_bufs[i].buf.mem_info = (void *)&app_bufs[i].mem_info;
+
+        /* Plane 0 needs to be set seperately. Set other planes
+             * in a loop. */
+        app_bufs[i].buf.planes_buf.planes[0].length = frame_offset_info->mp[0].len;
+        app_bufs[i].buf.planes_buf.planes[0].m.userptr =
+            (long unsigned int)app_bufs[i].buf.fd;
+        app_bufs[i].buf.planes_buf.planes[0].data_offset = frame_offset_info->mp[0].offset;
+        app_bufs[i].buf.planes_buf.planes[0].reserved[0] = 0;
+        for (j = 1; j < (uint8_t)frame_offset_info->num_planes; j++) {
+            app_bufs[i].buf.planes_buf.planes[j].length = frame_offset_info->mp[j].len;
+            app_bufs[i].buf.planes_buf.planes[j].m.userptr =
+                (long unsigned int)app_bufs[i].buf.fd;
+            app_bufs[i].buf.planes_buf.planes[j].data_offset = frame_offset_info->mp[j].offset;
+            app_bufs[i].buf.planes_buf.planes[j].reserved[0] =
+                app_bufs[i].buf.planes_buf.planes[j-1].reserved[0] +
+                app_bufs[i].buf.planes_buf.planes[j-1].length;
+        }
+    }
+    CDBG("%s: X", __func__);
+    return MM_CAMERA_OK;
+}
+
+int mm_app_release_bufs(uint8_t num_bufs,
+                        mm_camera_app_buf_t* app_bufs)
+{
+    int i, rc = MM_CAMERA_OK;
+
+    CDBG("%s: E", __func__);
+
+    for (i = 0; i < num_bufs; i++) {
+        rc = mm_app_deallocate_ion_memory(&app_bufs[i]);
+    }
+    memset(app_bufs, 0, num_bufs * sizeof(mm_camera_app_buf_t));
+    CDBG("%s: X", __func__);
+    return rc;
+}
+
+int mm_app_stream_initbuf(cam_frame_len_offset_t *frame_offset_info,
+                          uint8_t *num_bufs,
+                          uint8_t **initial_reg_flag,
+                          mm_camera_buf_def_t **bufs,
+                          mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                          void *user_data)
+{
+    mm_camera_stream_t *stream = (mm_camera_stream_t *)user_data;
+    mm_camera_buf_def_t *pBufs = NULL;
+    uint8_t *reg_flags = NULL;
+    int i, rc;
+
+    stream->offset = *frame_offset_info;
+
+    CDBG("%s: alloc buf for stream_id %d, len=%d, num planes: %d, offset: %d",
+         __func__,
+         stream->s_id,
+         frame_offset_info->frame_len,
+         frame_offset_info->num_planes,
+         frame_offset_info->mp[1].offset);
+
+    if (stream->num_of_bufs > CAM_MAX_NUM_BUFS_PER_STREAM)
+        stream->num_of_bufs = CAM_MAX_NUM_BUFS_PER_STREAM;
+
+    pBufs = (mm_camera_buf_def_t *)malloc(sizeof(mm_camera_buf_def_t) * stream->num_of_bufs);
+    reg_flags = (uint8_t *)malloc(sizeof(uint8_t) * stream->num_of_bufs);
+    if (pBufs == NULL || reg_flags == NULL) {
+        CDBG_ERROR("%s: No mem for bufs", __func__);
+        if (pBufs != NULL) {
+            free(pBufs);
+        }
+        if (reg_flags != NULL) {
+            free(reg_flags);
+        }
+        return -1;
+    }
+
+    rc = mm_app_alloc_bufs(&stream->s_bufs[0],
+                           frame_offset_info,
+                           stream->num_of_bufs,
+                           1,
+                           stream->multipleOf);
+
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: mm_stream_alloc_bufs err = %d", __func__, rc);
+        free(pBufs);
+        free(reg_flags);
+        return rc;
+    }
+
+    for (i = 0; i < stream->num_of_bufs; i++) {
+        /* mapping stream bufs first */
+        pBufs[i] = stream->s_bufs[i].buf;
+        reg_flags[i] = 1;
+        rc = ops_tbl->map_ops(pBufs[i].buf_idx,
+                              -1,
+                              pBufs[i].fd,
+                              (uint32_t)pBufs[i].frame_len,
+                              CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s: mapping buf[%d] err = %d", __func__, i, rc);
+            break;
+        }
+    }
+
+    if (rc != MM_CAMERA_OK) {
+        int j;
+        for (j=0; j>i; j++) {
+            ops_tbl->unmap_ops(pBufs[j].buf_idx, -1,
+                    CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+        }
+        mm_app_release_bufs(stream->num_of_bufs, &stream->s_bufs[0]);
+        free(pBufs);
+        free(reg_flags);
+        return rc;
+    }
+
+    *num_bufs = stream->num_of_bufs;
+    *bufs = pBufs;
+    *initial_reg_flag = reg_flags;
+
+    CDBG("%s: X",__func__);
+    return rc;
+}
+
+int32_t mm_app_stream_deinitbuf(mm_camera_map_unmap_ops_tbl_t *ops_tbl,
+                                void *user_data)
+{
+    mm_camera_stream_t *stream = (mm_camera_stream_t *)user_data;
+    int i;
+
+    for (i = 0; i < stream->num_of_bufs ; i++) {
+        /* mapping stream bufs first */
+        ops_tbl->unmap_ops(stream->s_bufs[i].buf.buf_idx, -1,
+                CAM_MAPPING_BUF_TYPE_STREAM_BUF, ops_tbl->userdata);
+    }
+
+    mm_app_release_bufs(stream->num_of_bufs, &stream->s_bufs[0]);
+
+    CDBG("%s: X",__func__);
+    return 0;
+}
+
+int32_t mm_app_stream_clean_invalidate_buf(uint32_t index, void *user_data)
+{
+    mm_camera_stream_t *stream = (mm_camera_stream_t *)user_data;
+    return mm_app_cache_ops(&stream->s_bufs[index].mem_info,
+      ION_IOC_CLEAN_INV_CACHES);
+}
+
+int32_t mm_app_stream_invalidate_buf(uint32_t index, void *user_data)
+{
+    mm_camera_stream_t *stream = (mm_camera_stream_t *)user_data;
+    return mm_app_cache_ops(&stream->s_bufs[index].mem_info, ION_IOC_INV_CACHES);
+}
+
+static void notify_evt_cb(uint32_t camera_handle,
+                          mm_camera_event_t *evt,
+                          void *user_data)
+{
+    mm_camera_test_obj_t *test_obj =
+        (mm_camera_test_obj_t *)user_data;
+    if (test_obj == NULL || test_obj->cam->camera_handle != camera_handle) {
+        CDBG_ERROR("%s: Not a valid test obj", __func__);
+        return;
+    }
+
+    CDBG("%s:E evt = %d", __func__, evt->server_event_type);
+    switch (evt->server_event_type) {
+       case CAM_EVENT_TYPE_AUTO_FOCUS_DONE:
+           CDBG("%s: rcvd auto focus done evt", __func__);
+           break;
+       case CAM_EVENT_TYPE_ZOOM_DONE:
+           CDBG("%s: rcvd zoom done evt", __func__);
+           break;
+       default:
+           break;
+    }
+
+    CDBG("%s:X", __func__);
+}
+
+int mm_app_open(mm_camera_app_t *cam_app,
+                int cam_id,
+                mm_camera_test_obj_t *test_obj)
+{
+    int32_t rc = 0;
+    cam_frame_len_offset_t offset_info;
+
+    CDBG("%s:BEGIN\n", __func__);
+
+    rc = cam_app->hal_lib.mm_camera_open((uint8_t)cam_id, &(test_obj->cam));
+    if(rc) {
+        CDBG_ERROR("%s:dev open error. rc = %d, vtbl = %p\n", __func__, rc, test_obj->cam);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    CDBG("Open Camera id = %d handle = %d", cam_id, test_obj->cam->camera_handle);
+
+    /* alloc ion mem for capability buf */
+    memset(&offset_info, 0, sizeof(offset_info));
+    offset_info.frame_len = sizeof(cam_capability_t);
+
+    rc = mm_app_alloc_bufs(&test_obj->cap_buf,
+                           &offset_info,
+                           1,
+                           0,
+                           0);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:alloc buf for capability error\n", __func__);
+        goto error_after_cam_open;
+    }
+
+    /* mapping capability buf */
+    rc = test_obj->cam->ops->map_buf(test_obj->cam->camera_handle,
+                                     CAM_MAPPING_BUF_TYPE_CAPABILITY,
+                                     test_obj->cap_buf.mem_info.fd,
+                                     test_obj->cap_buf.mem_info.size);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:map for capability error\n", __func__);
+        goto error_after_cap_buf_alloc;
+    }
+
+    /* alloc ion mem for getparm buf */
+    memset(&offset_info, 0, sizeof(offset_info));
+    offset_info.frame_len = sizeof(parm_buffer_t);
+    rc = mm_app_alloc_bufs(&test_obj->parm_buf,
+                           &offset_info,
+                           1,
+                           0,
+                           0);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:alloc buf for getparm_buf error\n", __func__);
+        goto error_after_cap_buf_map;
+    }
+
+    /* mapping getparm buf */
+    rc = test_obj->cam->ops->map_buf(test_obj->cam->camera_handle,
+                                     CAM_MAPPING_BUF_TYPE_PARM_BUF,
+                                     test_obj->parm_buf.mem_info.fd,
+                                     test_obj->parm_buf.mem_info.size);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:map getparm_buf error\n", __func__);
+        goto error_after_getparm_buf_alloc;
+    }
+    test_obj->params_buffer = (parm_buffer_t*) test_obj->parm_buf.mem_info.data;
+    CDBG_HIGH("\n%s params_buffer=%p\n",__func__,test_obj->params_buffer);
+
+    rc = test_obj->cam->ops->register_event_notify(test_obj->cam->camera_handle,
+                                                   notify_evt_cb,
+                                                   test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: failed register_event_notify", __func__);
+        rc = -MM_CAMERA_E_GENERAL;
+        goto error_after_getparm_buf_map;
+    }
+
+    rc = test_obj->cam->ops->query_capability(test_obj->cam->camera_handle);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: failed query_capability", __func__);
+        rc = -MM_CAMERA_E_GENERAL;
+        goto error_after_getparm_buf_map;
+    }
+    memset(&test_obj->jpeg_ops, 0, sizeof(mm_jpeg_ops_t));
+    mm_dimension pic_size;
+    memset(&pic_size, 0, sizeof(mm_dimension));
+    pic_size.w = 4000;
+    pic_size.h = 3000;
+    test_obj->jpeg_hdl = cam_app->hal_lib.jpeg_open(&test_obj->jpeg_ops,pic_size);
+    if (test_obj->jpeg_hdl == 0) {
+        CDBG_ERROR("%s: jpeg lib open err", __func__);
+        rc = -MM_CAMERA_E_GENERAL;
+        goto error_after_getparm_buf_map;
+    }
+
+    return rc;
+
+error_after_getparm_buf_map:
+    test_obj->cam->ops->unmap_buf(test_obj->cam->camera_handle,
+                                  CAM_MAPPING_BUF_TYPE_PARM_BUF);
+error_after_getparm_buf_alloc:
+    mm_app_release_bufs(1, &test_obj->parm_buf);
+error_after_cap_buf_map:
+    test_obj->cam->ops->unmap_buf(test_obj->cam->camera_handle,
+                                  CAM_MAPPING_BUF_TYPE_CAPABILITY);
+error_after_cap_buf_alloc:
+    mm_app_release_bufs(1, &test_obj->cap_buf);
+error_after_cam_open:
+    test_obj->cam->ops->close_camera(test_obj->cam->camera_handle);
+    test_obj->cam = NULL;
+    return rc;
+}
+
+int init_batch_update(parm_buffer_t *p_table)
+{
+    int rc = MM_CAMERA_OK;
+    CDBG_HIGH("\nEnter %s\n",__func__);
+    int32_t hal_version = CAM_HAL_V1;
+
+    memset(p_table, 0, sizeof(parm_buffer_t));
+    if(ADD_SET_PARAM_ENTRY_TO_BATCH(p_table, CAM_INTF_PARM_HAL_VERSION, hal_version)) {
+        rc = -1;
+    }
+
+    return rc;
+}
+
+int commit_set_batch(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    int i = 0;
+
+    for(i = 0; i < CAM_INTF_PARM_MAX; i++){
+        if(test_obj->params_buffer->is_valid[i])
+            break;
+    }
+    if (i < CAM_INTF_PARM_MAX) {
+        CDBG_HIGH("\n set_param p_buffer =%p\n",test_obj->params_buffer);
+        rc = test_obj->cam->ops->set_parms(test_obj->cam->camera_handle, test_obj->params_buffer);
+    }
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: cam->ops->set_parms failed !!", __func__);
+    }
+    return rc;
+}
+
+int mm_app_close(mm_camera_test_obj_t *test_obj)
+{
+    int32_t rc = MM_CAMERA_OK;
+
+    if (test_obj == NULL || test_obj->cam ==NULL) {
+        CDBG_ERROR("%s: cam not opened", __func__);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    /* unmap capability buf */
+    rc = test_obj->cam->ops->unmap_buf(test_obj->cam->camera_handle,
+                                       CAM_MAPPING_BUF_TYPE_CAPABILITY);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: unmap capability buf failed, rc=%d", __func__, rc);
+    }
+
+    /* unmap parm buf */
+    rc = test_obj->cam->ops->unmap_buf(test_obj->cam->camera_handle,
+                                       CAM_MAPPING_BUF_TYPE_PARM_BUF);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: unmap setparm buf failed, rc=%d", __func__, rc);
+    }
+
+    rc = test_obj->cam->ops->close_camera(test_obj->cam->camera_handle);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: close camera failed, rc=%d", __func__, rc);
+    }
+    test_obj->cam = NULL;
+
+    /* close jpeg client */
+    if (test_obj->jpeg_hdl && test_obj->jpeg_ops.close) {
+        rc = test_obj->jpeg_ops.close(test_obj->jpeg_hdl);
+        test_obj->jpeg_hdl = 0;
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s: close jpeg failed, rc=%d", __func__, rc);
+        }
+    }
+
+    /* dealloc capability buf */
+    rc = mm_app_release_bufs(1, &test_obj->cap_buf);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: release capability buf failed, rc=%d", __func__, rc);
+    }
+
+    /* dealloc parm buf */
+    rc = mm_app_release_bufs(1, &test_obj->parm_buf);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: release setparm buf failed, rc=%d", __func__, rc);
+    }
+
+    return MM_CAMERA_OK;
+}
+
+mm_camera_channel_t * mm_app_add_channel(mm_camera_test_obj_t *test_obj,
+                                         mm_camera_channel_type_t ch_type,
+                                         mm_camera_channel_attr_t *attr,
+                                         mm_camera_buf_notify_t channel_cb,
+                                         void *userdata)
+{
+    uint32_t ch_id = 0;
+    mm_camera_channel_t *channel = NULL;
+
+    ch_id = test_obj->cam->ops->add_channel(test_obj->cam->camera_handle,
+                                            attr,
+                                            channel_cb,
+                                            userdata);
+    if (ch_id == 0) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return NULL;
+    }
+    channel = &test_obj->channels[ch_type];
+    channel->ch_id = ch_id;
+    return channel;
+}
+
+int mm_app_del_channel(mm_camera_test_obj_t *test_obj,
+                       mm_camera_channel_t *channel)
+{
+    test_obj->cam->ops->delete_channel(test_obj->cam->camera_handle,
+                                       channel->ch_id);
+    memset(channel, 0, sizeof(mm_camera_channel_t));
+    return MM_CAMERA_OK;
+}
+
+mm_camera_stream_t * mm_app_add_stream(mm_camera_test_obj_t *test_obj,
+                                       mm_camera_channel_t *channel)
+{
+    mm_camera_stream_t *stream = NULL;
+    int rc = MM_CAMERA_OK;
+    cam_frame_len_offset_t offset_info;
+
+    stream = &(channel->streams[channel->num_streams++]);
+    stream->s_id = test_obj->cam->ops->add_stream(test_obj->cam->camera_handle,
+                                                  channel->ch_id);
+    if (stream->s_id == 0) {
+        CDBG_ERROR("%s: add stream failed", __func__);
+        return NULL;
+    }
+
+    stream->multipleOf = test_obj->slice_size;
+
+    /* alloc ion mem for stream_info buf */
+    memset(&offset_info, 0, sizeof(offset_info));
+    offset_info.frame_len = sizeof(cam_stream_info_t);
+
+    rc = mm_app_alloc_bufs(&stream->s_info_buf,
+                           &offset_info,
+                           1,
+                           0,
+                           0);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:alloc buf for stream_info error\n", __func__);
+        test_obj->cam->ops->delete_stream(test_obj->cam->camera_handle,
+                                          channel->ch_id,
+                                          stream->s_id);
+        stream->s_id = 0;
+        return NULL;
+    }
+
+    /* mapping streaminfo buf */
+    rc = test_obj->cam->ops->map_stream_buf(test_obj->cam->camera_handle,
+                                            channel->ch_id,
+                                            stream->s_id,
+                                            CAM_MAPPING_BUF_TYPE_STREAM_INFO,
+                                            0,
+                                            -1,
+                                            stream->s_info_buf.mem_info.fd,
+                                            (uint32_t)stream->s_info_buf.mem_info.size);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:map setparm_buf error\n", __func__);
+        mm_app_deallocate_ion_memory(&stream->s_info_buf);
+        test_obj->cam->ops->delete_stream(test_obj->cam->camera_handle,
+                                          channel->ch_id,
+                                          stream->s_id);
+        stream->s_id = 0;
+        return NULL;
+    }
+
+    return stream;
+}
+
+int mm_app_del_stream(mm_camera_test_obj_t *test_obj,
+                      mm_camera_channel_t *channel,
+                      mm_camera_stream_t *stream)
+{
+    test_obj->cam->ops->unmap_stream_buf(test_obj->cam->camera_handle,
+                                         channel->ch_id,
+                                         stream->s_id,
+                                         CAM_MAPPING_BUF_TYPE_STREAM_INFO,
+                                         0,
+                                         -1);
+    mm_app_deallocate_ion_memory(&stream->s_info_buf);
+    test_obj->cam->ops->delete_stream(test_obj->cam->camera_handle,
+                                      channel->ch_id,
+                                      stream->s_id);
+    memset(stream, 0, sizeof(mm_camera_stream_t));
+    return MM_CAMERA_OK;
+}
+
+mm_camera_channel_t *mm_app_get_channel_by_type(mm_camera_test_obj_t *test_obj,
+                                                mm_camera_channel_type_t ch_type)
+{
+    return &test_obj->channels[ch_type];
+}
+
+int mm_app_config_stream(mm_camera_test_obj_t *test_obj,
+                         mm_camera_channel_t *channel,
+                         mm_camera_stream_t *stream,
+                         mm_camera_stream_config_t *config)
+{
+    return test_obj->cam->ops->config_stream(test_obj->cam->camera_handle,
+                                             channel->ch_id,
+                                             stream->s_id,
+                                             config);
+}
+
+int mm_app_start_channel(mm_camera_test_obj_t *test_obj,
+                         mm_camera_channel_t *channel)
+{
+    return test_obj->cam->ops->start_channel(test_obj->cam->camera_handle,
+                                             channel->ch_id);
+}
+
+int mm_app_stop_channel(mm_camera_test_obj_t *test_obj,
+                        mm_camera_channel_t *channel)
+{
+    return test_obj->cam->ops->stop_channel(test_obj->cam->camera_handle,
+                                            channel->ch_id);
+}
+
+int initBatchUpdate(mm_camera_test_obj_t *test_obj)
+{
+    int32_t hal_version = CAM_HAL_V1;
+
+    parm_buffer_t *parm_buf = ( parm_buffer_t * ) test_obj->parm_buf.mem_info.data;
+    memset(parm_buf, 0, sizeof(parm_buffer_t));
+    ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_HAL_VERSION, hal_version);
+
+    return MM_CAMERA_OK;
+}
+
+int commitSetBatch(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    int i = 0;
+
+    parm_buffer_t *p_table = ( parm_buffer_t * ) test_obj->parm_buf.mem_info.data;
+    for(i = 0; i < CAM_INTF_PARM_MAX; i++){
+        if(p_table->is_valid[i])
+            break;
+    }
+    if (i < CAM_INTF_PARM_MAX) {
+        rc = test_obj->cam->ops->set_parms(test_obj->cam->camera_handle, p_table);
+    }
+    return rc;
+}
+
+
+int commitGetBatch(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    int i = 0;
+    parm_buffer_t *p_table = ( parm_buffer_t * ) test_obj->parm_buf.mem_info.data;
+    for(i = 0; i < CAM_INTF_PARM_MAX; i++){
+        if(p_table->is_valid[i])
+            break;
+    }
+    if (i < CAM_INTF_PARM_MAX) {
+        rc = test_obj->cam->ops->get_parms(test_obj->cam->camera_handle, p_table);
+    }
+    return rc;
+}
+
+int setAecLock(mm_camera_test_obj_t *test_obj, int value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_AEC_LOCK, (uint32_t)value)) {
+        CDBG_ERROR("%s: AEC Lock parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+ERROR:
+    return rc;
+}
+
+int setAwbLock(mm_camera_test_obj_t *test_obj, int value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_AWB_LOCK, (uint32_t)value)) {
+        CDBG_ERROR("%s: AWB Lock parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+ERROR:
+    return rc;
+}
+
+
+int set3Acommand(mm_camera_test_obj_t *test_obj, cam_eztune_cmd_data_t *value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_EZTUNE_CMD, *value)) {
+        CDBG_ERROR("%s: CAM_INTF_PARM_EZTUNE_CMD parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+ERROR:
+    return rc;
+}
+
+int getChromatix(mm_camera_test_obj_t *test_obj, tune_chromatix_t *value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_GET_CHROMATIX, *value)) {
+        CDBG_ERROR("%s: getChromatixPointer not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitGetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    READ_PARAM_ENTRY(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_GET_CHROMATIX, *value);
+
+ERROR:
+    return rc;
+}
+
+int setReloadChromatix(mm_camera_test_obj_t *test_obj, tune_chromatix_t *value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_SET_RELOAD_CHROMATIX, *value)) {
+        CDBG_ERROR("%s: getChromatixPointer not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+ERROR:
+    return rc;
+}
+
+int getAutofocusParams(mm_camera_test_obj_t *test_obj, tune_autofocus_t *value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_GET_AFTUNE, *value)) {
+        CDBG_ERROR("%s: getChromatixPointer not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitGetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    READ_PARAM_ENTRY(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_GET_AFTUNE, *value);
+
+ERROR:
+    return rc;
+}
+
+int setReloadAutofocusParams(mm_camera_test_obj_t *test_obj, tune_autofocus_t *value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_SET_RELOAD_AFTUNE, *value)) {
+        CDBG_ERROR("%s: setReloadAutofocusParams not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+ERROR:
+    return rc;
+}
+
+int setAutoFocusTuning(mm_camera_test_obj_t *test_obj, tune_actuator_t *value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_SET_AUTOFOCUSTUNING, *value)) {
+        CDBG_ERROR("%s: AutoFocus Tuning not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+ERROR:
+    return rc;
+}
+
+int setVfeCommand(mm_camera_test_obj_t *test_obj, tune_cmd_t *value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_SET_VFE_COMMAND, *value)) {
+        CDBG_ERROR("%s: VFE Command not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+ERROR:
+    return rc;
+}
+
+int setPPCommand(mm_camera_test_obj_t *test_obj, tune_cmd_t *value)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_SET_PP_COMMAND, *value)) {
+        CDBG_ERROR("%s: PP Command not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+ERROR:
+    return rc;
+}
+
+int setFocusMode(mm_camera_test_obj_t *test_obj, cam_focus_mode_type mode)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    uint32_t value = mode;
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_FOCUS_MODE, value)) {
+        CDBG_ERROR("%s: Focus mode parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+ERROR:
+    return rc;
+}
+
+int setEVCompensation(mm_camera_test_obj_t *test_obj, int ev)
+{
+    int rc = MM_CAMERA_OK;
+
+    cam_capability_t *camera_cap = NULL;
+
+    camera_cap = (cam_capability_t *) test_obj->cap_buf.mem_info.data;
+    if ( (ev >= camera_cap->exposure_compensation_min) &&
+         (ev <= camera_cap->exposure_compensation_max) ) {
+
+        rc = initBatchUpdate(test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+            goto ERROR;
+        }
+
+        if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+                CAM_INTF_PARM_EXPOSURE_COMPENSATION, ev)) {
+            CDBG_ERROR("%s: EV compensation parameter not added to batch\n", __func__);
+            rc = -1;
+            goto ERROR;
+        }
+
+        rc = commitSetBatch(test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+            goto ERROR;
+        }
+
+        CDBG_ERROR("%s: EV compensation set to: %d", __func__, ev);
+    } else {
+        CDBG_ERROR("%s: Invalid EV compensation", __func__);
+        return -EINVAL;
+    }
+
+ERROR:
+    return rc;
+}
+
+int setAntibanding(mm_camera_test_obj_t *test_obj, cam_antibanding_mode_type antibanding)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_ANTIBANDING, antibanding)) {
+        CDBG_ERROR("%s: Antibanding parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: Antibanding set to: %d", __func__, (int)antibanding);
+
+ERROR:
+    return rc;
+}
+
+int setWhiteBalance(mm_camera_test_obj_t *test_obj, cam_wb_mode_type mode)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_WHITE_BALANCE, mode)) {
+        CDBG_ERROR("%s: White balance parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: White balance set to: %d", __func__, (int)mode);
+
+ERROR:
+    return rc;
+}
+
+int setExposureMetering(mm_camera_test_obj_t *test_obj, cam_auto_exposure_mode_type mode)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_EXPOSURE, mode)) {
+        CDBG_ERROR("%s: Exposure metering parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: Exposure metering set to: %d", __func__, (int)mode);
+
+ERROR:
+    return rc;
+}
+
+int setBrightness(mm_camera_test_obj_t *test_obj, int brightness)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_BRIGHTNESS, brightness)) {
+        CDBG_ERROR("%s: Brightness parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: Brightness set to: %d", __func__, brightness);
+
+ERROR:
+    return rc;
+}
+
+int setContrast(mm_camera_test_obj_t *test_obj, int contrast)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_CONTRAST, contrast)) {
+        CDBG_ERROR("%s: Contrast parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: Contrast set to: %d", __func__, contrast);
+
+ERROR:
+    return rc;
+}
+
+int setTintless(mm_camera_test_obj_t *test_obj, int tintless)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_TINTLESS, tintless)) {
+        CDBG_ERROR("%s: Tintless parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s:  set Tintless to: %d", __func__, tintless);
+
+ERROR:
+    return rc;
+}
+
+int setSaturation(mm_camera_test_obj_t *test_obj, int saturation)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_SATURATION, saturation)) {
+        CDBG_ERROR("%s: Saturation parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: Saturation set to: %d", __func__, saturation);
+
+ERROR:
+    return rc;
+}
+
+int setSharpness(mm_camera_test_obj_t *test_obj, int sharpness)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_SHARPNESS, sharpness)) {
+        CDBG_ERROR("%s: Sharpness parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    test_obj->reproc_sharpness = sharpness;
+    CDBG_ERROR("%s: Sharpness set to: %d", __func__, sharpness);
+
+ERROR:
+    return rc;
+}
+
+int setISO(mm_camera_test_obj_t *test_obj, cam_iso_mode_type iso)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_ISO, iso)) {
+        CDBG_ERROR("%s: ISO parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: ISO set to: %d", __func__, (int)iso);
+
+ERROR:
+    return rc;
+}
+
+int setZoom(mm_camera_test_obj_t *test_obj, int zoom)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_ZOOM, zoom)) {
+        CDBG_ERROR("%s: Zoom parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: Zoom set to: %d", __func__, zoom);
+
+ERROR:
+    return rc;
+}
+
+int setFPSRange(mm_camera_test_obj_t *test_obj, cam_fps_range_t range)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_FPS_RANGE, range)) {
+        CDBG_ERROR("%s: FPS range parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: FPS Range set to: [%5.2f:%5.2f]",
+                __func__,
+                range.min_fps,
+                range.max_fps);
+
+ERROR:
+    return rc;
+}
+
+int setScene(mm_camera_test_obj_t *test_obj, cam_scene_mode_type scene)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_BESTSHOT_MODE, scene)) {
+        CDBG_ERROR("%s: Scene parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: Scene set to: %d", __func__, (int)scene);
+
+ERROR:
+    return rc;
+}
+
+int setFlash(mm_camera_test_obj_t *test_obj, cam_flash_mode_t flash)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_LED_MODE, flash)) {
+        CDBG_ERROR("%s: Flash parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+    CDBG_ERROR("%s: Flash set to: %d", __func__, (int)flash);
+
+ERROR:
+    return rc;
+}
+
+int setWNR(mm_camera_test_obj_t *test_obj, uint8_t enable)
+{
+    int rc = MM_CAMERA_OK;
+
+    rc = initBatchUpdate(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch camera parameter update failed\n", __func__);
+        goto ERROR;
+    }
+
+    cam_denoise_param_t param;
+    memset(&param, 0, sizeof(cam_denoise_param_t));
+    param.denoise_enable = enable;
+    param.process_plates = CAM_WAVELET_DENOISE_YCBCR_PLANE;
+
+    if (ADD_SET_PARAM_ENTRY_TO_BATCH(test_obj->parm_buf.mem_info.data,
+            CAM_INTF_PARM_WAVELET_DENOISE, param)) {
+        CDBG_ERROR("%s: WNR enabled parameter not added to batch\n", __func__);
+        rc = -1;
+        goto ERROR;
+    }
+
+    rc = commitSetBatch(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: Batch parameters commit failed\n", __func__);
+        goto ERROR;
+    }
+
+
+    test_obj->reproc_wnr = param;
+    CDBG_ERROR("%s: WNR enabled: %d", __func__, enable);
+
+ERROR:
+    return rc;
+}
+
+
+/** tuneserver_capture
+ *    @lib_handle: the camera handle object
+ *    @dim: snapshot dimensions
+ *
+ *  makes JPEG capture
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+int tuneserver_capture(mm_camera_lib_handle *lib_handle,
+                       mm_camera_lib_snapshot_params *dim)
+{
+    int rc = 0;
+
+    printf("Take jpeg snapshot\n");
+    if ( lib_handle->stream_running ) {
+
+        if ( lib_handle->test_obj.zsl_enabled) {
+            if ( NULL != dim) {
+                if ( ( lib_handle->test_obj.buffer_width != dim->width) ||
+                     ( lib_handle->test_obj.buffer_height = dim->height ) ) {
+
+                    lib_handle->test_obj.buffer_width = dim->width;
+                    lib_handle->test_obj.buffer_height = dim->height;
+
+                    rc = mm_camera_lib_stop_stream(lib_handle);
+                    if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: mm_camera_lib_stop_stream() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                    }
+
+                    rc = mm_camera_lib_start_stream(lib_handle);
+                    if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: mm_camera_lib_start_stream() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                    }
+                }
+
+            }
+
+            lib_handle->test_obj.encodeJpeg = 1;
+
+            mm_camera_app_wait();
+        } else {
+            // For standard 2D capture streaming has to be disabled first
+            rc = mm_camera_lib_stop_stream(lib_handle);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_camera_lib_stop_stream() err=%d\n",
+                         __func__, rc);
+                goto EXIT;
+            }
+
+            if ( NULL != dim ) {
+                lib_handle->test_obj.buffer_width = dim->width;
+                lib_handle->test_obj.buffer_height = dim->height;
+            }
+            rc = mm_app_start_capture(&lib_handle->test_obj, 1);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_start_capture() err=%d\n",
+                         __func__, rc);
+                goto EXIT;
+            }
+
+            mm_camera_app_wait();
+
+            rc = mm_app_stop_capture(&lib_handle->test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_stop_capture() err=%d\n",
+                         __func__, rc);
+                goto EXIT;
+            }
+
+            // Restart streaming after capture is done
+            rc = mm_camera_lib_start_stream(lib_handle);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_camera_lib_start_stream() err=%d\n",
+                         __func__, rc);
+                goto EXIT;
+            }
+        }
+    }
+
+EXIT:
+
+    return rc;
+}
+
+int mm_app_start_regression_test(int run_tc)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_app_t my_cam_app;
+
+    CDBG("\nCamera Test Application\n");
+    memset(&my_cam_app, 0, sizeof(mm_camera_app_t));
+
+    rc = mm_app_load_hal(&my_cam_app);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: mm_app_load_hal failed !!", __func__);
+        return rc;
+    }
+
+    if(run_tc) {
+        rc = mm_app_unit_test_entry(&my_cam_app);
+        return rc;
+    }
+#if 0
+    if(run_dual_tc) {
+        printf("\tRunning Dual camera test engine only\n");
+        rc = mm_app_dual_test_entry(&my_cam_app);
+        printf("\t Dual camera engine. EXIT(%d)!!!\n", rc);
+        exit(rc);
+    }
+#endif
+    return rc;
+}
+
+int32_t mm_camera_load_tuninglibrary(mm_camera_tuning_lib_params_t *tuning_param)
+{
+  void *(*tuning_open_lib)(void) = NULL;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  tuning_param->lib_handle = dlopen("libmmcamera_tuning.so", RTLD_NOW);
+  if (!tuning_param->lib_handle) {
+    CDBG_ERROR("%s Failed opening libmmcamera_tuning.so\n", __func__);
+    return -EINVAL;
+  }
+
+  *(void **)&tuning_open_lib  = dlsym(tuning_param->lib_handle,
+    "open_tuning_lib");
+  if (!tuning_open_lib) {
+    CDBG_ERROR("%s Failed symbol libmmcamera_tuning.so\n", __func__);
+    return -EINVAL;
+  }
+
+  if (tuning_param->func_tbl) {
+    CDBG_ERROR("%s already loaded tuninglib..", __func__);
+    return 0;
+  }
+
+  tuning_param->func_tbl = (mm_camera_tune_func_t *)tuning_open_lib();
+  if (!tuning_param->func_tbl) {
+    CDBG_ERROR("%s Failed opening library func table ptr\n", __func__);
+    return -EINVAL;
+  }
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  return 0;
+}
+
+int mm_camera_lib_open(mm_camera_lib_handle *handle, int cam_id)
+{
+    int rc = MM_CAMERA_OK;
+
+    if ( NULL == handle ) {
+        CDBG_ERROR(" %s : Invalid handle", __func__);
+        rc = MM_CAMERA_E_INVALID_INPUT;
+        goto EXIT;
+    }
+
+    memset(handle, 0, sizeof(mm_camera_lib_handle));
+    rc = mm_app_load_hal(&handle->app_ctx);
+    if( MM_CAMERA_OK != rc ) {
+        CDBG_ERROR("%s:mm_app_init err\n", __func__);
+        goto EXIT;
+    }
+
+    handle->test_obj.buffer_width = DEFAULT_PREVIEW_WIDTH;
+    handle->test_obj.buffer_height = DEFAULT_PREVIEW_HEIGHT;
+    handle->test_obj.buffer_format = DEFAULT_SNAPSHOT_FORMAT;
+    handle->current_params.stream_width = DEFAULT_SNAPSHOT_WIDTH;
+    handle->current_params.stream_height = DEFAULT_SNAPSHOT_HEIGHT;
+    handle->current_params.af_mode = CAM_FOCUS_MODE_AUTO; // Default to auto focus mode
+    rc = mm_app_open(&handle->app_ctx, (uint8_t)cam_id, &handle->test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                   __func__, cam_id, rc);
+        goto EXIT;
+    }
+
+    //rc = mm_app_initialize_fb(&handle->test_obj);
+    rc = MM_CAMERA_OK;
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: mm_app_initialize_fb() cam_idx=%d, err=%d\n",
+                   __func__, cam_id, rc);
+        goto EXIT;
+    }
+
+EXIT:
+
+    return rc;
+}
+
+int mm_camera_lib_start_stream(mm_camera_lib_handle *handle)
+{
+    int rc = MM_CAMERA_OK;
+    cam_capability_t camera_cap;
+
+    if ( NULL == handle ) {
+        CDBG_ERROR(" %s : Invalid handle", __func__);
+        rc = MM_CAMERA_E_INVALID_INPUT;
+        goto EXIT;
+    }
+
+    if ( handle->test_obj.zsl_enabled ) {
+        rc = mm_app_start_preview_zsl(&handle->test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s: mm_app_start_preview_zsl() err=%d\n",
+                       __func__, rc);
+            goto EXIT;
+        }
+    } else {
+        handle->test_obj.enable_reproc = ENABLE_REPROCESSING;
+        rc = mm_app_start_preview(&handle->test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s: mm_app_start_preview() err=%d\n",
+                       __func__, rc);
+            goto EXIT;
+        }
+    }
+
+    // Configure focus mode after stream starts
+    rc = mm_camera_lib_get_caps(handle, &camera_cap);
+    if ( MM_CAMERA_OK != rc ) {
+      CDBG_ERROR("%s:mm_camera_lib_get_caps() err=%d\n", __func__, rc);
+      return -1;
+    }
+    if (camera_cap.supported_focus_modes_cnt == 1 &&
+      camera_cap.supported_focus_modes[0] == CAM_FOCUS_MODE_FIXED) {
+      CDBG("focus not supported");
+      handle->test_obj.focus_supported = 0;
+      handle->current_params.af_mode = CAM_FOCUS_MODE_FIXED;
+    } else {
+      handle->test_obj.focus_supported = 1;
+    }
+    rc = setFocusMode(&handle->test_obj, handle->current_params.af_mode);
+    if (rc != MM_CAMERA_OK) {
+      CDBG_ERROR("%s:autofocus error\n", __func__);
+      goto EXIT;
+    }
+    handle->stream_running = 1;
+
+EXIT:
+    return rc;
+}
+
+int mm_camera_lib_stop_stream(mm_camera_lib_handle *handle)
+{
+    int rc = MM_CAMERA_OK;
+
+    if ( NULL == handle ) {
+        CDBG_ERROR(" %s : Invalid handle", __func__);
+        rc = MM_CAMERA_E_INVALID_INPUT;
+        goto EXIT;
+    }
+
+    if ( handle->test_obj.zsl_enabled ) {
+        rc = mm_app_stop_preview_zsl(&handle->test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s: mm_app_stop_preview_zsl() err=%d\n",
+                       __func__, rc);
+            goto EXIT;
+        }
+    } else {
+        rc = mm_app_stop_preview(&handle->test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s: mm_app_stop_preview() err=%d\n",
+                       __func__, rc);
+            goto EXIT;
+        }
+    }
+
+    handle->stream_running = 0;
+
+EXIT:
+    return rc;
+}
+
+int mm_camera_lib_get_caps(mm_camera_lib_handle *handle,
+                           cam_capability_t *caps)
+{
+    int rc = MM_CAMERA_OK;
+
+    if ( NULL == handle ) {
+        CDBG_ERROR(" %s : Invalid handle", __func__);
+        rc = MM_CAMERA_E_INVALID_INPUT;
+        goto EXIT;
+    }
+
+    if ( NULL == caps ) {
+        CDBG_ERROR(" %s : Invalid capabilities structure", __func__);
+        rc = MM_CAMERA_E_INVALID_INPUT;
+        goto EXIT;
+    }
+
+    *caps = *( (cam_capability_t *) handle->test_obj.cap_buf.mem_info.data );
+
+EXIT:
+
+    return rc;
+}
+
+
+int mm_camera_lib_send_command(mm_camera_lib_handle *handle,
+                               mm_camera_lib_commands cmd,
+                               void *in_data, void *out_data)
+{
+    uint32_t width, height;
+    int rc = MM_CAMERA_OK;
+    cam_capability_t *camera_cap = NULL;
+    mm_camera_lib_snapshot_params *dim = NULL;
+
+    if ( NULL == handle ) {
+        CDBG_ERROR(" %s : Invalid handle", __func__);
+        rc = MM_CAMERA_E_INVALID_INPUT;
+        goto EXIT;
+    }
+
+    camera_cap = (cam_capability_t *) handle->test_obj.cap_buf.mem_info.data;
+
+    switch(cmd) {
+        case MM_CAMERA_LIB_FPS_RANGE:
+            if ( NULL != in_data ) {
+                cam_fps_range_t range = *(( cam_fps_range_t * )in_data);
+                rc = setFPSRange(&handle->test_obj, range);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setFPSRange() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_FLASH:
+            if ( NULL != in_data ) {
+                cam_flash_mode_t flash = *(( int * )in_data);
+                rc = setFlash(&handle->test_obj, flash);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setFlash() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_BESTSHOT:
+            if ( NULL != in_data ) {
+                cam_scene_mode_type scene = *(( int * )in_data);
+                rc = setScene(&handle->test_obj, scene);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setScene() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_ZOOM:
+            if ( NULL != in_data ) {
+                int zoom = *(( int * )in_data);
+                rc = setZoom(&handle->test_obj, zoom);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setZoom() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_ISO:
+            if ( NULL != in_data ) {
+                cam_iso_mode_type iso = *(( int * )in_data);
+                rc = setISO(&handle->test_obj, iso);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setISO() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_SHARPNESS:
+            if ( NULL != in_data ) {
+                int sharpness = *(( int * )in_data);
+                rc = setSharpness(&handle->test_obj, sharpness);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setSharpness() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_SATURATION:
+            if ( NULL != in_data ) {
+                int saturation = *(( int * )in_data);
+                rc = setSaturation(&handle->test_obj, saturation);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setSaturation() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_CONTRAST:
+            if ( NULL != in_data ) {
+                int contrast = *(( int * )in_data);
+                rc = setContrast(&handle->test_obj, contrast);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setContrast() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_SET_TINTLESS:
+            if ( NULL != in_data ) {
+                int tintless = *(( int * )in_data);
+                rc = setTintless(&handle->test_obj, tintless);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: enlabe/disable:%d tintless() err=%d\n",
+                                   __func__, tintless, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_BRIGHTNESS:
+            if ( NULL != in_data ) {
+                int brightness = *(( int * )in_data);
+                rc = setBrightness(&handle->test_obj, brightness);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setBrightness() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_EXPOSURE_METERING:
+            if ( NULL != in_data ) {
+                cam_auto_exposure_mode_type exp = *(( int * )in_data);
+                rc = setExposureMetering(&handle->test_obj, exp);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setExposureMetering() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_WB:
+            if ( NULL != in_data ) {
+                cam_wb_mode_type wb = *(( int * )in_data);
+                rc = setWhiteBalance(&handle->test_obj, wb);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setWhiteBalance() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_ANTIBANDING:
+            if ( NULL != in_data ) {
+                int antibanding = *(( int * )in_data);
+                rc = setAntibanding(&handle->test_obj, antibanding);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setAntibanding() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_EV:
+            if ( NULL != in_data ) {
+                int ev = *(( int * )in_data);
+                rc = setEVCompensation(&handle->test_obj, ev);
+                if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: setEVCompensation() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_ZSL_ENABLE:
+            if ( NULL != in_data) {
+                int enable_zsl = *(( int * )in_data);
+                if ( ( enable_zsl != handle->test_obj.zsl_enabled ) &&
+                        handle->stream_running ) {
+                    rc = mm_camera_lib_stop_stream(handle);
+                    if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: mm_camera_lib_stop_stream() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                    }
+                    handle->test_obj.zsl_enabled = enable_zsl;
+                    rc = mm_camera_lib_start_stream(handle);
+                    if (rc != MM_CAMERA_OK) {
+                        CDBG_ERROR("%s: mm_camera_lib_start_stream() err=%d\n",
+                                   __func__, rc);
+                        goto EXIT;
+                    }
+                } else {
+                    handle->test_obj.zsl_enabled = enable_zsl;
+                }
+            }
+            break;
+        case MM_CAMERA_LIB_RAW_CAPTURE:
+
+            if ( 0 == handle->stream_running ) {
+                CDBG_ERROR(" %s : Streaming is not enabled!", __func__);
+                rc = MM_CAMERA_E_INVALID_OPERATION;
+                goto EXIT;
+            }
+
+            rc = mm_camera_lib_stop_stream(handle);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_camera_lib_stop_stream() err=%d\n",
+                           __func__, rc);
+                goto EXIT;
+            }
+
+            width = handle->test_obj.buffer_width;
+            height = handle->test_obj.buffer_height;
+            handle->test_obj.buffer_width =
+                    (uint32_t)camera_cap->raw_dim[0].width;
+            handle->test_obj.buffer_height =
+                    (uint32_t)camera_cap->raw_dim[0].height;
+            handle->test_obj.buffer_format = DEFAULT_RAW_FORMAT;
+            CDBG_ERROR("%s: MM_CAMERA_LIB_RAW_CAPTURE %dx%d\n",
+                       __func__,
+                       camera_cap->raw_dim[0].width,
+                       camera_cap->raw_dim[0].height);
+            rc = mm_app_start_capture_raw(&handle->test_obj, 1);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_start_capture() err=%d\n",
+                           __func__, rc);
+                goto EXIT;
+            }
+
+            mm_camera_app_wait();
+
+            rc = mm_app_stop_capture_raw(&handle->test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_stop_capture() err=%d\n",
+                           __func__, rc);
+                goto EXIT;
+            }
+
+            handle->test_obj.buffer_width = width;
+            handle->test_obj.buffer_height = height;
+            handle->test_obj.buffer_format = DEFAULT_SNAPSHOT_FORMAT;
+            rc = mm_camera_lib_start_stream(handle);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_camera_lib_start_stream() err=%d\n",
+                           __func__, rc);
+                goto EXIT;
+            }
+
+            break;
+
+        case MM_CAMERA_LIB_JPEG_CAPTURE:
+            if ( 0 == handle->stream_running ) {
+                CDBG_ERROR(" %s : Streaming is not enabled!", __func__);
+                rc = MM_CAMERA_E_INVALID_OPERATION;
+                goto EXIT;
+            }
+
+            if ( NULL != in_data ) {
+                dim = ( mm_camera_lib_snapshot_params * ) in_data;
+            }
+
+            rc = tuneserver_capture(handle, dim);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:capture error %d\n", __func__, rc);
+                goto EXIT;
+            }
+
+            if (handle->test_obj.is_chromatix_reload == TRUE) {
+              /**Re-load Chromatix is taken care to make sure Tuned data **
+              ** is not lost when capture Snapshot                       **/
+              rc = setReloadChromatix(&handle->test_obj,
+                (tune_chromatix_t *)&(handle->test_obj.tune_data));
+              if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: setReloadChromatix failed\n", __func__);
+                goto EXIT;
+              }
+            }
+            break;
+
+        case MM_CAMERA_LIB_SET_FOCUS_MODE: {
+            cam_focus_mode_type mode = *((cam_focus_mode_type *)in_data);
+            handle->current_params.af_mode = mode;
+            rc = setFocusMode(&handle->test_obj, mode);
+            if (rc != MM_CAMERA_OK) {
+              CDBG_ERROR("%s:autofocus error\n", __func__);
+              goto EXIT;
+            }
+            break;
+        }
+
+        case MM_CAMERA_LIB_DO_AF:
+            if (handle->test_obj.focus_supported) {
+              rc = handle->test_obj.cam->ops->do_auto_focus(handle->test_obj.cam->camera_handle);
+              if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:autofocus error\n", __func__);
+                goto EXIT;
+              }
+              /*Waiting for Auto Focus Done Call Back*/
+              mm_camera_app_wait();
+            }
+            break;
+
+        case MM_CAMERA_LIB_CANCEL_AF:
+            rc = handle->test_obj.cam->ops->cancel_auto_focus(handle->test_obj.cam->camera_handle);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:autofocus error\n", __func__);
+                goto EXIT;
+            }
+
+            break;
+
+        case MM_CAMERA_LIB_LOCK_AWB:
+            rc = setAwbLock(&handle->test_obj, 1);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: AWB locking failed\n", __func__);
+                goto EXIT;
+            }
+            break;
+
+        case MM_CAMERA_LIB_UNLOCK_AWB:
+            rc = setAwbLock(&handle->test_obj, 0);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: AE unlocking failed\n", __func__);
+                goto EXIT;
+            }
+            break;
+
+        case MM_CAMERA_LIB_LOCK_AE:
+            rc = setAecLock(&handle->test_obj, 1);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: AE locking failed\n", __func__);
+                goto EXIT;
+            }
+            break;
+
+        case MM_CAMERA_LIB_UNLOCK_AE:
+            rc = setAecLock(&handle->test_obj, 0);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: AE unlocking failed\n", __func__);
+                goto EXIT;
+            }
+            break;
+
+       case MM_CAMERA_LIB_SET_3A_COMMAND: {
+          rc = set3Acommand(&handle->test_obj, (cam_eztune_cmd_data_t *)in_data);
+          if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:3A set command error\n", __func__);
+            goto EXIT;
+          }
+          break;
+        }
+
+       case MM_CAMERA_LIB_GET_CHROMATIX: {
+           rc = getChromatix(&handle->test_obj,
+                (tune_chromatix_t *)out_data);
+           if (rc != MM_CAMERA_OK) {
+             CDBG_ERROR("%s: getChromatix failed\n", __func__);
+             goto EXIT;
+           }
+           break;
+       }
+
+       case MM_CAMERA_LIB_SET_RELOAD_CHROMATIX: {
+           rc = setReloadChromatix(&handle->test_obj,
+             (tune_chromatix_t *)in_data);
+           if (rc != MM_CAMERA_OK) {
+             CDBG_ERROR("%s: setReloadChromatix failed\n", __func__);
+             goto EXIT;
+           }
+           handle->test_obj.is_chromatix_reload = TRUE;
+           memcpy((void *)&(handle->test_obj.tune_data),
+             (void *)in_data, sizeof(tune_chromatix_t));
+           break;
+       }
+
+       case MM_CAMERA_LIB_GET_AFTUNE: {
+           rc = getAutofocusParams(&handle->test_obj,
+                (tune_autofocus_t *)out_data);
+           if (rc != MM_CAMERA_OK) {
+             CDBG_ERROR("%s: getAutofocusParams failed\n", __func__);
+             goto EXIT;
+           }
+           break;
+       }
+
+       case MM_CAMERA_LIB_SET_RELOAD_AFTUNE: {
+           rc = setReloadAutofocusParams(&handle->test_obj,
+             (tune_autofocus_t *)in_data);
+           if (rc != MM_CAMERA_OK) {
+             CDBG_ERROR("%s: setReloadAutofocusParams failed\n", __func__);
+             goto EXIT;
+           }
+           break;
+       }
+
+       case MM_CAMERA_LIB_SET_AUTOFOCUS_TUNING: {
+           rc = setAutoFocusTuning(&handle->test_obj, in_data);
+           if (rc != MM_CAMERA_OK) {
+             CDBG_ERROR("%s: Set AF tuning failed\n", __func__);
+             goto EXIT;
+           }
+           break;
+       }
+
+       case MM_CAMERA_LIB_SET_VFE_COMMAND: {
+           rc = setVfeCommand(&handle->test_obj, in_data);
+           if (rc != MM_CAMERA_OK) {
+             CDBG_ERROR("%s: Set vfe command failed\n", __func__);
+             goto EXIT;
+           }
+           break;
+       }
+
+       case MM_CAMERA_LIB_SET_POSTPROC_COMMAND: {
+           rc = setPPCommand(&handle->test_obj, in_data);
+           if (rc != MM_CAMERA_OK) {
+             CDBG_ERROR("%s: Set pp command failed\n", __func__);
+             goto EXIT;
+           }
+           break;
+       }
+
+        case MM_CAMERA_LIB_WNR_ENABLE: {
+            rc = setWNR(&handle->test_obj, *((uint8_t *)in_data));
+            if ( rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: Set wnr enable failed\n", __func__);
+                goto EXIT;
+            }
+        }
+
+      case MM_CAMERA_LIB_NO_ACTION:
+        default:
+            break;
+    };
+
+EXIT:
+
+    return rc;
+}
+int mm_camera_lib_number_of_cameras(mm_camera_lib_handle *handle)
+{
+    int rc = 0;
+
+    if ( NULL == handle ) {
+        CDBG_ERROR(" %s : Invalid handle", __func__);
+        goto EXIT;
+    }
+
+    rc = handle->app_ctx.num_cameras;
+
+EXIT:
+
+    return rc;
+}
+
+int mm_camera_lib_close(mm_camera_lib_handle *handle)
+{
+    int rc = MM_CAMERA_OK;
+
+    if ( NULL == handle ) {
+        CDBG_ERROR(" %s : Invalid handle", __func__);
+        rc = MM_CAMERA_E_INVALID_INPUT;
+        goto EXIT;
+    }
+
+    //rc = mm_app_close_fb(&handle->test_obj);
+    rc = MM_CAMERA_OK;
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:mm_app_close_fb() err=%d\n",
+                   __func__, rc);
+        goto EXIT;
+    }
+
+    rc = mm_app_close(&handle->test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:mm_app_close() err=%d\n",
+                   __func__, rc);
+        goto EXIT;
+    }
+
+EXIT:
+    return rc;
+}
+
+int mm_camera_lib_set_preview_usercb(
+   mm_camera_lib_handle *handle, prev_callback cb)
+{
+    if (handle->test_obj.user_preview_cb != NULL) {
+        CDBG_ERROR("%s, already set preview callbacks\n", __func__);
+        return -1;
+    }
+    handle->test_obj.user_preview_cb = *cb;
+    return 0;
+}
+
+int mm_app_set_preview_fps_range(mm_camera_test_obj_t *test_obj,
+                        cam_fps_range_t *fpsRange)
+{
+    int rc = MM_CAMERA_OK;
+    CDBG_HIGH("%s: preview fps range: min=%f, max=%f.", __func__,
+        fpsRange->min_fps, fpsRange->max_fps);
+    rc = setFPSRange(test_obj, *fpsRange);
+
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: add_parm_entry_tobatch failed !!", __func__);
+        return rc;
+    }
+
+    return rc;
+}
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_commands.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_commands.c
new file mode 100644
index 0000000..890a8cb
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_commands.c
@@ -0,0 +1,291 @@
+/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include <stdlib.h>
+#include <cutils/properties.h>
+#include <fcntl.h>
+#include <dlfcn.h>
+#include <mm_qcamera_app.h>
+#include "mm_qcamera_commands.h"
+#include "mm_qcamera_dbg.h"
+
+int tuneserver_initialize_prevtuningp(void * ctrl,
+  int pr_client_socket_id, cam_dimension_t dimension,
+  char **send_buf, uint32_t *send_len)
+{
+  int result = 0;
+  mm_camera_lib_handle *lib_handle = (mm_camera_lib_handle *) ctrl;
+  tuningserver_t *tctrl = &lib_handle->tsctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  if (tctrl->tuning_params.func_tbl->prevcommand_process == NULL) {
+      ALOGE("%s  %d\n", __func__, __LINE__);
+      return -1;
+  }
+
+  result = tctrl->tuning_params.func_tbl->prevcommand_process(
+      NULL, TUNE_PREVCMD_INIT, (void *)&pr_client_socket_id,
+      send_buf, send_len);
+  result = tctrl->tuning_params.func_tbl->prevcommand_process(
+      NULL, TUNE_PREVCMD_SETDIM, (void *)&dimension,
+      send_buf, send_len);
+
+  mm_camera_lib_set_preview_usercb(lib_handle,
+      (tctrl->tuning_params.func_tbl->prevframe_callback));
+
+  return result;
+}
+
+int tuneserver_deinitialize_prevtuningp(void * ctrl,
+    char **send_buf, uint32_t *send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+
+  result = tctrl->tuning_params.func_tbl->prevcommand_process(
+    &tctrl->pr_proto, TUNE_PREVCMD_DEINIT, NULL, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_preview_getinfo(void * ctrl, char **send_buf, uint32_t *send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = tctrl->tuning_params.func_tbl->prevcommand_process(
+    &tctrl->pr_proto, TUNE_PREVCMD_GETINFO, NULL, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_preview_getchunksize(void * ctrl,
+  char **send_buf, uint32_t *send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = tctrl->tuning_params.func_tbl->prevcommand_process(
+    &tctrl->pr_proto, TUNE_PREVCMD_GETCHUNKSIZE,
+    (void *)&tctrl->pr_proto->new_cnk_size, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_preview_getframe(void * ctrl,
+  char **send_buf, uint32_t *send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = tctrl->tuning_params.func_tbl->prevcommand_process(
+    &tctrl->pr_proto, TUNE_PREVCMD_GETFRAME, NULL, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_preview_unsupported(void * ctrl,
+  char **send_buf, uint32_t *send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = tctrl->tuning_params.func_tbl->prevcommand_process(
+    &tctrl->pr_proto, TUNE_PREVCMD_UNSUPPORTED, NULL, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_initialize_tuningp(void * ctrl, int client_socket_id,
+  char *send_buf, uint32_t send_len)
+{
+  int result = 0;
+  mm_camera_lib_handle *lib_handle = (mm_camera_lib_handle *) ctrl;
+  tuningserver_t *tctrl = &lib_handle->tsctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = tctrl->tuning_params.func_tbl->command_process(
+    lib_handle, TUNE_CMD_INIT, &client_socket_id, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_deinitialize_tuningp(void * ctrl, int client_socket_id,
+  char *send_buf, uint32_t send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+
+  result = tctrl->tuning_params.func_tbl->command_process(
+    NULL, TUNE_CMD_DEINIT, &client_socket_id, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_process_get_list_cmd(void * ctrl, void *recv_cmd,
+  char *send_buf, uint32_t send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = tctrl->tuning_params.func_tbl->command_process(
+     recv_cmd, TUNE_CMD_GET_LIST, NULL, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_process_get_params_cmd(void * ctrl, void *recv_cmd,
+  char *send_buf, uint32_t send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = tctrl->tuning_params.func_tbl->command_process
+    (recv_cmd, TUNE_CMD_GET_PARAMS, NULL, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_process_set_params_cmd(void * ctrl, void *recv_cmd,
+  char *send_buf, uint32_t send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = tctrl->tuning_params.func_tbl->command_process(
+     recv_cmd, TUNE_CMD_SET_PARAMS, NULL, send_buf, send_len);
+
+  return result;
+}
+
+int tuneserver_process_misc_cmd(void * ctrl, void *recv_cmd,
+  char *send_buf, uint32_t send_len)
+{
+  int result = 0;
+  tuningserver_t *tctrl = (tuningserver_t *) ctrl;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = tctrl->tuning_params.func_tbl->command_process(
+     recv_cmd, TUNE_CMD_MISC, NULL, send_buf, send_len);
+
+  return result;
+}
+
+/** tuneserver_close_cam
+ *    @lib_handle: the camera handle object
+ *
+ *  closes the camera
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+int tuneserver_close_cam(mm_camera_lib_handle *lib_handle)
+{
+  int result = 0;
+
+  result = mm_camera_lib_close(lib_handle);
+  if (result < 0) {
+    printf("%s: Camera close failed\n", __func__);
+  } else {
+    printf("Camera is closed \n");
+  }
+  return result;
+}
+#if 0
+/** tuneserver_start_cam
+ *    @lib_handle: the camera handle object
+ *
+ *  starts the camera
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+static int tuneserver_start_cam(mm_camera_lib_handle *lib_handle)
+{
+  int result = 0;
+
+  result = mm_camera_lib_start_stream(lib_handle);
+  if (result < 0) {
+    printf("%s: Camera start failed\n", __func__);
+    goto error1;
+  }
+  return result;
+error1:
+  mm_camera_lib_close(lib_handle);
+  return result;
+}
+#endif
+
+/** tuneserver_stop_cam
+ *    @lib_handle: the camera handle object
+ *
+ *  stops the camera
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+int tuneserver_stop_cam(mm_camera_lib_handle *lib_handle)
+{
+  int result = 0;
+
+  result = mm_camera_lib_stop_stream(lib_handle);
+  if (result < 0) {
+    printf("%s: Camera stop failed\n", __func__);
+  }
+//  result = mm_camera_lib_close(lib_handle);
+  return result;
+}
+
+/** tuneserver_open_cam
+ *    @lib_handle: the camera handle object
+ *
+ *  opens the camera
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+#if 1
+int tuneserver_open_cam(mm_camera_lib_handle *lib_handle)
+{
+  int result = 0;
+
+  CDBG("%s  %d\n", __func__, __LINE__);
+  result = mm_camera_load_tuninglibrary(&lib_handle->tsctrl.tuning_params);
+  if (result < 0) {
+    CDBG_ERROR("%s: tuning library open failed\n", __func__);
+  }
+  return result;
+}
+#endif
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_dual_test.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_dual_test.c
new file mode 100755
index 0000000..6a3515c
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_dual_test.c
@@ -0,0 +1,1936 @@
+/*
+Copyright (c) 2012, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include <pthread.h>
+#include "mm_camera_dbg.h"
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <poll.h>
+#include "mm_qcamera_unit_test.h"
+
+#define MM_QCAMERA_APP_UTEST_MAX_MAIN_LOOP 4
+#define MM_QCAM_APP_TEST_NUM 128
+
+#define MM_QCAMERA_APP_WAIT_TIME 1000000000
+
+extern int system_dimension_set(int cam_id);
+extern int stopPreview(int cam_id);
+extern int takePicture_yuv(int cam_id);
+extern int takePicture_rdi(int cam_id);
+extern int startRdi(int cam_id);
+extern int stopRdi(int cam_id);
+extern int startStats(int cam_id);
+extern int stopStats(int cam_id);
+
+
+/*
+* 1. open back
+* 2. open front
+* 3. start back
+* 4. start front
+* 5. stop back
+* 6. stop front
+* 7. close back
+* 8. close front
+* 9. take picture
+* a. start recording
+* b. stop recording
+* c. take picture rdi
+*/
+static mm_app_tc_t mm_app_tc[MM_QCAM_APP_TEST_NUM];
+static int num_test_cases = 0;
+struct test_case_params {
+  uint16_t launch;
+  uint16_t preview;
+  uint16_t recording;
+  uint16_t snapshot;
+};
+
+/*  Test case 12436857 :*/
+
+int mm_app_dtc_0(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 0...\n");
+        CDBG_ERROR("DUAL open back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Rdi for front \n");
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+               CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL start camera Preview for back \n");
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL stop camera Rdi for front \n");
+        if( MM_CAMERA_OK != (rc = stopRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+
+        CDBG_ERROR("DUAL close front camera\n");
+        if( mm_app_close(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        sleep(1);
+        CDBG_ERROR("DUAL stop camera Preview for back \n");
+        if( MM_CAMERA_OK != (rc = stopPreview(back_camera))) {
+                CDBG("%s: startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL close back camera \n");
+        if( mm_app_close(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case 12436587 :*/
+
+int mm_app_dtc_1(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 1...\n");
+        CDBG_ERROR("DUAL open back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Rdi for front \n");
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+               CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL start camera Preview for back \n");
+
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+        CDBG_ERROR("DUAL end \n");
+
+        CDBG_ERROR("DUAL stop camera Preview for front \n");
+        if( MM_CAMERA_OK != (rc = stopRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL stop camera Preview for back \n");
+        if( MM_CAMERA_OK != (rc = stopPreview(back_camera))) {
+                CDBG("%s: startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL close front camera\n");
+        if( mm_app_close(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL close back camera \n");
+        if( mm_app_close(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case 12436578 :*/
+
+int mm_app_dtc_2(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 2...\n");
+        CDBG_ERROR("DUAL open back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Rdi for front \n");
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+               CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL start camera Preview for back \n");
+
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+        CDBG_ERROR("DUAL end \n");
+
+        CDBG_ERROR("DUAL stop camera Preview for front \n");
+        if( MM_CAMERA_OK != (rc = stopRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL stop camera Preview for back \n");
+        if( MM_CAMERA_OK != (rc = stopPreview(back_camera))) {
+                CDBG("%s: startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL close back camera \n");
+        if( mm_app_close(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL close front camera\n");
+        if( mm_app_close(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case 241395768 : 1357 * 3, This is performed three times
+* And for each iteration 9 is performed thrice */
+
+int mm_app_dtc_3(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j,k;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview and snapshot on back Camera and RDI on Front camera 3...\n");
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Preview for front \n");
+
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() frontcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        usleep(10*1000);
+
+        for (k = 0; k < MM_QCAMERA_APP_INTERATION ; k++) {
+          CDBG_ERROR("DUAL open back camera %d \n",k);
+          if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                  CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                  rc = -1;
+                  goto end;
+          }
+
+          if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                  CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                  rc = -1;
+                  goto end;
+          }
+
+          CDBG_ERROR("DUAL start camera Preview for back \n");
+          if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                 CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                  goto end;
+          }
+
+          for (j = 0; j < MM_QCAMERA_APP_INTERATION; j++) {
+              CDBG_ERROR("DUAL take picture for back \n");
+              if ( MM_CAMERA_OK != (rc = takePicture_yuv(back_camera))) {
+                  CDBG_ERROR("%s: TakePicture() err=%d\n", __func__, rc);
+                  break;
+              }
+              mm_camera_app_wait();
+
+          }
+          usleep(10*1000);
+          CDBG_ERROR("DUAL stop camera Preview for back \n");
+          if( MM_CAMERA_OK != (rc = stopPreview(back_camera))) {
+                  CDBG_ERROR("%s: stopPreview() backcamera err=%d\n", __func__, rc);
+                  goto end;
+          }
+          usleep(10*1000);
+
+          CDBG_ERROR("DUAL close back camera\n");
+          if( mm_app_close(back_camera) != MM_CAMERA_OK) {
+                  CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                  rc = -1;
+                  goto end;
+          }
+          usleep(20*1000);
+        }
+        CDBG_ERROR("DUAL stop camera Preview for Rdi \n");
+        if( MM_CAMERA_OK != (rc = stopRdi(front_camera))) {
+                CDBG("%s: stopRdi() err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL close front camera \n");
+        if( mm_app_close(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case 2413ab5768 : 1357 * 3, This is performed three times
+* And for each iteration ab is performed thrice */
+
+int mm_app_dtc_4(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j,k;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 4...\n");
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Preview for front \n");
+
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() frontcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        usleep(20*1000);
+
+        for (k = 0; k < MM_QCAMERA_APP_INTERATION ; k++){
+          CDBG_ERROR("DUAL open back camera %d \n",k);
+          if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                 CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                 rc = -1;
+                 goto end;
+          }
+
+          if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                 CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                 rc = -1;
+                 goto end;
+          }
+
+          CDBG_ERROR("DUAL start camera Preview for back \n");
+          if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                 goto end;
+          }
+          usleep(30*1000);
+
+          for (j = 0; j < MM_QCAMERA_APP_INTERATION; j++) {
+             CDBG_ERROR("DUAL start camera record for back \n");
+             if ( MM_CAMERA_OK != (rc = startRecording(back_camera))) {
+                 CDBG_ERROR("%s: StartVideorecording() err=%d\n", __func__, rc);
+                 break;
+             }
+
+             mm_camera_app_wait();
+             usleep(15*1000);
+             CDBG_ERROR("DUAL stop camera record for back \n");
+             if ( MM_CAMERA_OK != (rc = stopRecording(back_camera))) {
+                 CDBG_ERROR("%s: Stopvideorecording() err=%d\n", __func__, rc);
+                 break;
+             }
+          }
+          usleep(10*1000);
+
+          CDBG_ERROR("DUAL stop camera Preview for back \n");
+          if( MM_CAMERA_OK != (rc = stopPreview(back_camera))) {
+                 CDBG_ERROR("%s: stopPreview() backcamera err=%d\n", __func__, rc);
+                 goto end;
+          }
+          usleep(10*1000);
+
+          CDBG_ERROR("DUAL close back camera\n");
+          if( mm_app_close(back_camera) != MM_CAMERA_OK) {
+                 CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                 rc = -1;
+                 goto end;
+          }
+          usleep(20*1000);
+        }
+        CDBG_ERROR("DUAL stop camera Preview for Rdi \n");
+        if( MM_CAMERA_OK != (rc = stopRdi(front_camera))) {
+                CDBG("%s: stopRdi() err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL close front camera \n");
+        if( mm_app_close(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case 24135768 : 1357 * 3, This is performed three times*/
+
+int mm_app_dtc_5(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j,k;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 5...\n");
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Preview for front \n");
+
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() frontcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        for (k = 0; k < 4 ; k++) {
+          CDBG_ERROR("DUAL open back camera %d \n",k);
+          if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                  CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                  rc = -1;
+                  goto end;
+          }
+
+          if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                  CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                  rc = -1;
+                  goto end;
+          }
+
+          CDBG_ERROR("DUAL start camera Preview for back \n");
+          if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                 CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                  goto end;
+          }
+          mm_camera_app_wait();
+          sleep(1);
+
+          CDBG_ERROR("DUAL stop camera Preview for back \n");
+          if( MM_CAMERA_OK != (rc = stopPreview(back_camera))) {
+                  CDBG_ERROR("%s: stopPreview() backcamera err=%d\n", __func__, rc);
+                  goto end;
+          }
+          usleep(10*1000);
+
+          CDBG_ERROR("DUAL close back camera\n");
+          if( mm_app_close(back_camera) != MM_CAMERA_OK) {
+                  CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                  rc = -1;
+                  goto end;
+          }
+          sleep(1);
+        }
+        CDBG_ERROR("DUAL stop camera Preview for Rdi \n");
+        if( MM_CAMERA_OK != (rc = stopRdi(front_camera))) {
+                CDBG("%s: stopRdi() err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL close front camera \n");
+        if( mm_app_close(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case 13246857 : 2468 * 3, This is performed three times*/
+
+int mm_app_dtc_6(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j,k;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 6...\n");
+        CDBG_ERROR("DUAL open back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Preview for back \n");
+
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        for (k = 0; k < 4 ; k++) {
+        CDBG_ERROR("DUAL open front camera %d \n",k);
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Rdi for front \n");
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+               CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL stop camera Preview for front \n");
+        if( MM_CAMERA_OK != (rc = stopRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+
+        CDBG_ERROR("DUAL close front camera\n");
+        if( mm_app_close(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        sleep(1);
+        }
+        CDBG_ERROR("DUAL stop camera Preview for back \n");
+        if( MM_CAMERA_OK != (rc = stopPreview(back_camera))) {
+                CDBG("%s: startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL close back camera \n");
+        if( mm_app_close(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*Multi Threaded Test Cases*/
+static void *front_thread(void *data)
+{
+        int front_camera = 1;
+        int rc = MM_CAMERA_OK;
+        int i,j,k,m;
+        struct test_case_params params
+          = *((struct test_case_params *)data);
+        for (i = 0; i < params.launch; i++) {
+          CDBG_ERROR("DUAL open front camera %d\n",i);
+          if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+          }
+
+          if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+            CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+          }
+
+          for (j = 0; j < params.preview; j++) {
+            CDBG_ERROR("DUAL start camera Rdi for front %d ,%d \n",i,j);
+            if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+              CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+              goto end;
+            }
+            mm_camera_app_wait();
+            usleep(20*1000);
+            for (k = 0; k < params.snapshot; k++) {
+              CDBG_ERROR("DUAL take picture for front %d,%d,%d \n",i,j,k);
+              if ( MM_CAMERA_OK != (rc = takePicture_rdi(front_camera))) {
+                CDBG_ERROR("%s: TakePicture() err=%d\n", __func__, rc);
+                goto end;
+              }
+              mm_camera_app_wait();
+              usleep(30*1000);
+            }
+            CDBG_ERROR("DUAL stop camera Rdi for front %d,%d\n",i,j);
+            if( MM_CAMERA_OK != (rc = stopRdi(front_camera))) {
+              CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+              goto end;
+            }
+            usleep(10*1000);
+          }
+
+          CDBG_ERROR("DUAL close front camera %d\n",i);
+          if( mm_app_close(front_camera) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+          }
+        }
+end:
+        CDBG_ERROR("DUAL front thread close %d",rc);
+        return NULL;
+}
+
+static void *back_thread(void *data)
+{
+        int rc = MM_CAMERA_OK;
+        int back_camera = 0;
+        int i,j,k,m;
+        struct test_case_params params
+          = *((struct test_case_params *)data);
+        for (i = 0; i < params.launch; i++) {
+          CDBG_ERROR("DUAL open back camera %d\n",i);
+          if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+          }
+          if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+            CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+          }
+
+          for (j = 0; j < params.preview; j++) {
+            CDBG_ERROR("DUAL start camera Preview for back %d, %d\n",i,j);
+            if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+              CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+              goto end;
+            }
+            mm_camera_app_wait();
+            usleep(20*1000);
+            for (k = 0; k < params.snapshot; k++) {
+              CDBG_ERROR("DUAL take picture for back %d, %d, %d\n",i,j,k);
+              if ( MM_CAMERA_OK != (rc = takePicture_yuv(back_camera))) {
+                CDBG_ERROR("%s: TakePicture() err=%d\n", __func__, rc);
+                goto end;
+              }
+              mm_camera_app_wait();
+              usleep(30*1000);
+            }
+
+            for (m = 0; m < params.recording; m++) {
+              CDBG_ERROR("DUAL start record for back %d, %d, %d\n",i,j,m);
+              if ( MM_CAMERA_OK != (rc = startRecording(back_camera))) {
+                CDBG_ERROR("%s: StartVideorecording() err=%d\n", __func__, rc);
+                break;
+              }
+
+              mm_camera_app_wait();
+              usleep(10*1000);
+              CDBG_ERROR("DUAL stop camera record for back \n");
+              if ( MM_CAMERA_OK != (rc = stopRecording(back_camera))) {
+                CDBG_ERROR("%s: Stopvideorecording() err=%d\n", __func__, rc);
+                break;
+              }
+              usleep(10*1000);
+            }
+            CDBG_ERROR("DUAL stop camera Preview for back %d, %d\n",i,j);
+            if( MM_CAMERA_OK != (rc = stopPreview(back_camera))) {
+              CDBG("%s: startPreview() err=%d\n", __func__, rc);
+              goto end;
+            }
+            usleep(10*1000);
+          }
+
+          CDBG_ERROR("DUAL close back camera %d\n",i);
+          if( mm_app_close(back_camera) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+          }
+        }
+end:
+        CDBG_ERROR("DUAL back thread close %d",rc);
+        return NULL;
+}
+
+/*  Test case m13572468 : Open & start  in 2 concurrent pthread*/
+int mm_app_dtc_7(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int result = 0;
+        pthread_t back_thread_id, front_thread_id;
+        struct test_case_params params;
+        memset(&params, 0, sizeof(struct test_case_params));
+        params.launch = 5;
+        params.preview = 5;
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 7...\n");
+
+        CDBG_ERROR("start back DUAL ");
+        rc = pthread_create(&back_thread_id, NULL, back_thread, &params);
+        CDBG_ERROR("start front DUAL ");
+        rc = pthread_create(&front_thread_id, NULL, front_thread, &params);
+        sleep(1);
+        CDBG_ERROR("stop back DUAL ");
+        rc = pthread_join(back_thread_id, NULL);
+        CDBG_ERROR("stop front DUAL ");
+        rc = pthread_join(front_thread_id, NULL);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+          printf("\nPassed\n");
+        }else{
+          printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case m139572468 : Open & start in 2 concurrent pthread*/
+int mm_app_dtc_8(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int result = 0;
+
+        pthread_t back_thread_id, front_thread_id;
+        struct test_case_params bparams, fparams;
+        memset(&bparams, 0, sizeof(struct test_case_params));
+        memset(&fparams, 0, sizeof(struct test_case_params));
+        bparams.launch = 5;
+        bparams.preview = 5;
+        bparams.snapshot= 5;
+        fparams.launch = 5;
+        fparams.preview = 5;
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 8...\n");
+
+        CDBG_ERROR("start back DUAL ");
+        rc = pthread_create(&back_thread_id, NULL, back_thread, &bparams);
+        CDBG_ERROR("start front DUAL ");
+        rc = pthread_create(&front_thread_id, NULL, front_thread, &fparams);
+        sleep(1);
+        CDBG_ERROR("stop back DUAL ");
+        rc = pthread_join(back_thread_id, NULL);
+        CDBG_ERROR("stop front DUAL ");
+        rc = pthread_join(front_thread_id, NULL);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0)
+          printf("\nPassed\n");
+        else
+          printf("\nFailed\n");
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case m1395724c68 : Open & start in 2 concurrent pthread*/
+int mm_app_dtc_9(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int result = 0;
+
+        pthread_t back_thread_id, front_thread_id;
+        struct test_case_params bparams, fparams;
+        memset(&bparams, 0, sizeof(struct test_case_params));
+        memset(&fparams, 0, sizeof(struct test_case_params));
+        bparams.launch = 5;
+        bparams.preview = 5;
+        bparams.snapshot= 5;
+        fparams.launch = 5;
+        fparams.preview = 5;
+        fparams.snapshot = 5;
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 9...\n");
+
+        CDBG_ERROR("start back DUAL ");
+        rc = pthread_create(&back_thread_id, NULL, back_thread, &bparams);
+        CDBG_ERROR("start front DUAL ");
+        rc = pthread_create(&front_thread_id, NULL, front_thread, &fparams);
+        sleep(1);
+        CDBG_ERROR("stop back DUAL ");
+        rc = pthread_join(back_thread_id, NULL);
+        CDBG_ERROR("stop front DUAL ");
+        rc = pthread_join(front_thread_id, NULL);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+          printf("\nPassed\n");
+        }else{
+          printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case m13ab572468 : Open & start in 2 concurrent pthread*/
+int mm_app_dtc_10(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int result = 0;
+
+        pthread_t back_thread_id, front_thread_id;
+        struct test_case_params bparams, fparams;
+        memset(&bparams, 0, sizeof(struct test_case_params));
+        memset(&fparams, 0, sizeof(struct test_case_params));
+        bparams.launch = 5;
+        bparams.preview = 5;
+        bparams.recording= 5;
+        fparams.launch = 5;
+        fparams.preview = 5;
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 10...\n");
+
+        CDBG_ERROR("start back DUAL ");
+        rc = pthread_create(&back_thread_id, NULL, back_thread, &bparams);
+        CDBG_ERROR("start front DUAL ");
+        rc = pthread_create(&front_thread_id, NULL, front_thread, &fparams);
+        sleep(1);
+        CDBG_ERROR("stop back DUAL ");
+        rc = pthread_join(back_thread_id, NULL);
+        CDBG_ERROR("stop front DUAL ");
+        rc = pthread_join(front_thread_id, NULL);
+        CDBG_ERROR("DUAL end \n");
+end:
+        if(rc == 0) {
+          printf("\nPassed\n");
+        }else{
+          printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case m13ab5724c68 : Open & start in 2 concurrent pthread*/
+int mm_app_dtc_11(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int result = 0;
+
+        pthread_t back_thread_id, front_thread_id;
+        struct test_case_params bparams, fparams;
+        memset(&bparams, 0, sizeof(struct test_case_params));
+        memset(&fparams, 0, sizeof(struct test_case_params));
+        bparams.launch = 5;
+        bparams.preview = 5;
+        bparams.recording= 5;
+        fparams.launch = 5;
+        fparams.preview = 5;
+        fparams.snapshot = 5;
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 11...\n");
+
+        CDBG_ERROR("start back DUAL ");
+        rc = pthread_create(&back_thread_id, NULL, back_thread, &bparams);
+        CDBG_ERROR("start front DUAL ");
+        rc = pthread_create(&front_thread_id, NULL, front_thread, &fparams);
+        sleep(1);
+        CDBG_ERROR("stop back DUAL ");
+        rc = pthread_join(back_thread_id, NULL);
+        CDBG_ERROR("stop front DUAL ");
+        rc = pthread_join(front_thread_id, NULL);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case m1728 : Open & start in 2 concurrent pthread*/
+int mm_app_dtc_12(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int result = 0;
+
+        pthread_t back_thread_id, front_thread_id;
+        struct test_case_params bparams, fparams;
+        memset(&bparams, 0, sizeof(struct test_case_params));
+        memset(&fparams, 0, sizeof(struct test_case_params));
+        bparams.launch = 15;
+        fparams.launch = 15;
+        printf("\n Verifying Preview on back Camera and RDI on Front camera 12...\n");
+
+        CDBG_ERROR("start back DUAL ");
+        rc = pthread_create(&back_thread_id, NULL, back_thread, &bparams);
+        CDBG_ERROR("start front DUAL ");
+        rc = pthread_create(&front_thread_id, NULL, front_thread, &fparams);
+        sleep(1);
+        CDBG_ERROR("stop back DUAL ");
+        rc = pthread_join(back_thread_id, NULL);
+        CDBG_ERROR("stop front DUAL ");
+        rc = pthread_join(front_thread_id, NULL);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*  Test case 2413(ab)5768
+ *  Test the dual camera usecase. We startPreview on front camera,
+ *  but backend will allocate RDI buffers and start front camera in
+ *  RDI streaming mode. It then diverts RDI frames, converts them into YUV 420
+ *  through C2D and generate preview data in the buffers allocated here.
+ *  Back camera will use the pixel interface as usual.
+ */
+
+int mm_app_dtc_13(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j,k;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n 13. Verifying Preview + Recording on back Camera and Preview(through RDI) on Front camera\n");
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Preview for front \n");
+        if( MM_CAMERA_OK != (rc = startPreview(front_camera))) {
+               CDBG_ERROR("%s: front camera startPreview() err=%d\n", __func__, rc);
+               goto end;
+        }
+        mm_camera_app_wait();
+        usleep(20*1000);
+
+        for (k = 0; k < MM_QCAMERA_APP_INTERATION ; k++){
+          CDBG_ERROR("DUAL open back camera %d \n",k);
+          if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                 CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                 rc = -1;
+                 goto end;
+          }
+
+          if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                 CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                 rc = -1;
+                 goto end;
+          }
+
+          CDBG_ERROR("DUAL start camera Preview for back \n");
+          if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                 goto end;
+          }
+          usleep(30*1000);
+
+          for (j = 0; j < MM_QCAMERA_APP_INTERATION; j++) {
+             CDBG_ERROR("DUAL start camera record for back Iteration %d \n", j);
+             if ( MM_CAMERA_OK != (rc = startRecording(back_camera))) {
+                 CDBG_ERROR("%s: StartVideorecording() err=%d\n", __func__, rc);
+                 break;
+             }
+
+             mm_camera_app_wait();
+             usleep(10*1000*1000);
+             CDBG_ERROR("DUAL stop camera record for back Iteration %d\n", j);
+             if ( MM_CAMERA_OK != (rc = stopRecording(back_camera))) {
+                 CDBG_ERROR("%s: Stopvideorecording() err=%d\n", __func__, rc);
+                 break;
+             }
+          }
+          usleep(10*1000);
+
+          CDBG_ERROR("DUAL stop camera Preview for back \n");
+          if( MM_CAMERA_OK != (rc = stopPreview(back_camera))) {
+                 CDBG_ERROR("%s: stopPreview() backcamera err=%d\n", __func__, rc);
+                 goto end;
+          }
+          usleep(10*1000);
+
+          CDBG_ERROR("DUAL close back camera\n");
+          if( mm_app_close(back_camera) != MM_CAMERA_OK) {
+                 CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                 rc = -1;
+                 goto end;
+          }
+          usleep(20*1000);
+        }
+        CDBG_ERROR("DUAL stop camera Preview for Rdi \n");
+        if( MM_CAMERA_OK != (rc = stopPreview(front_camera))) {
+                CDBG_ERROR("%s: stopPreview() frontcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        usleep(10*1000);
+        CDBG_ERROR("DUAL close front camera \n");
+        if( mm_app_close(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/*Below 6  are reference test cases just to test the open path for dual camera*/
+int mm_app_dtc_1243(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera...\n");
+        CDBG_ERROR("DUAL open back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Rdi for front \n");
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+               CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL start camera Preview for back \n");
+
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+int mm_app_dtc_2134(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera...\n");
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL open back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera Preview for front \n");
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+               CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL start camera Rdi for back \n");
+
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+int mm_app_dtc_2143(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera...\n");
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL open back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera rdi for front \n");
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+               CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL start camera preview for back \n");
+
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+int mm_app_dtc_2413(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera...\n");
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera rdi for front \n");
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+               CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL open back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera preview for back \n");
+
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+int mm_app_dtc_1234(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera...\n");
+        CDBG_ERROR("DUAL open back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL open front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+        CDBG_ERROR("DUAL start camera preview for back \n");
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+               CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+
+        CDBG_ERROR("DUAL start camera rdi for front \n");
+
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+                printf("\nPassed\n");
+        }else{
+                printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+int mm_app_dtc_1324(mm_camera_app_t *cam_apps)
+{
+        int rc = MM_CAMERA_OK;
+        int i,j;
+        int result = 0;
+        int front_camera = 1;
+        int back_camera = 0;
+
+        printf("\n Verifying Preview on back Camera and RDI on Front camera...\n");
+        CDBG_ERROR("DUAL start back camera \n");
+        if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL start camera preview for back \n");
+        if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+                CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+                goto end;
+        }
+        //mm_camera_app_wait();
+        sleep(1);
+        CDBG_ERROR("DUAL start front camera \n");
+        if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+
+       if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+                CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+                rc = -1;
+                goto end;
+        }
+        CDBG_ERROR("DUAL start rdi preview \n");
+
+        if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+                CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+                goto end;
+        }
+        mm_camera_app_wait();
+        sleep(1);
+        CDBG_ERROR("DUAL end \n");
+
+end:
+        if(rc == 0) {
+          printf("\nPassed\n");
+        }else{
+          printf("\nFailed\n");
+        }
+        CDBG("%s:END, rc = %d\n", __func__, rc);
+        return rc;
+}
+
+/* single camera test cases*/
+int mm_app_dtc_s_0(mm_camera_app_t *cam_apps)
+{
+    int rc = MM_CAMERA_OK;
+    int i,j;
+    int result = 0;
+    int front_camera = 1;
+    int back_camera = 0;
+
+    printf("\n Verifying Preview on back Camera and RDI on Front camera...\n");
+
+    if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+        rc = -1;
+        goto end;
+    }
+    if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+    CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+        rc = -1;
+        goto end;
+    }
+
+    if( MM_CAMERA_OK != (rc = startPreview(back_camera))) {
+        CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+        goto end;
+    }
+
+    mm_camera_app_wait();
+    if(mm_app_open(front_camera) != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:mm_app_open() front camera err=%d\n",__func__, rc);
+        rc = -1;
+        goto end;
+    }
+    if(system_dimension_set(front_camera) != MM_CAMERA_OK){
+        CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+        rc = -1;
+        goto end;
+    }
+
+    if( MM_CAMERA_OK != (rc = startRdi(front_camera))) {
+        CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+        goto end;
+    }
+    mm_camera_app_wait();
+
+    if( MM_CAMERA_OK != (rc = stopRdi(front_camera))) {
+        CDBG_ERROR("%s: startPreview() backcamera err=%d\n", __func__, rc);
+        goto end;
+    }
+
+    if( MM_CAMERA_OK != (rc = stopPreview(my_cam_app.cam_open))) {
+        CDBG("%s: startPreview() err=%d\n", __func__, rc);
+        goto end;
+    }
+
+    if( mm_app_close(my_cam_app.cam_open) != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+        rc = -1;
+        goto end;
+    }
+end:
+    if(rc == 0) {
+        printf("\nPassed\n");
+    }else{
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_dtc_s_1(mm_camera_app_t *cam_apps)
+{
+    int rc = MM_CAMERA_OK;
+    int i,j;
+    int result = 0;
+
+    printf("\n Verifying Snapshot on front and back camera...\n");
+    for(i = 0; i < cam_apps->num_cameras; i++) {
+        if( mm_app_open(i) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+        }
+        if(system_dimension_set(my_cam_app.cam_open) != MM_CAMERA_OK){
+            CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+        }
+
+        if( MM_CAMERA_OK != (rc = startPreview(my_cam_app.cam_open))) {
+                CDBG_ERROR("%s: startPreview() err=%d\n", __func__, rc);
+                break;
+        }
+        for(j = 0; j < MM_QCAMERA_APP_INTERATION; j++) {
+            if( MM_CAMERA_OK != (rc = takePicture_yuv(my_cam_app.cam_open))) {
+                CDBG_ERROR("%s: TakePicture() err=%d\n", __func__, rc);
+                break;
+            }
+            /*if(mm_camera_app_timedwait() == ETIMEDOUT) {
+                CDBG_ERROR("%s: Snapshot/Preview Callback not received in time or qbuf Faile\n", __func__);
+                break;
+            }*/
+            mm_camera_app_wait();
+            result++;
+        }
+        if( MM_CAMERA_OK != (rc = stopPreview(my_cam_app.cam_open))) {
+            CDBG("%s: startPreview() err=%d\n", __func__, rc);
+            break;
+        }
+        if( mm_app_close(my_cam_app.cam_open) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+        }
+        if(result != MM_QCAMERA_APP_INTERATION) {
+            printf("%s: Snapshot Start/Stop Fails for Camera %d in %d iteration", __func__, i,j);
+            rc = -1;
+            break;
+        }
+
+        result = 0;
+    }
+end:
+    if(rc == 0) {
+        printf("\t***Passed***\n");
+    }else{
+        printf("\t***Failed***\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_dtc_s_2(mm_camera_app_t *cam_apps)
+{
+    int rc = MM_CAMERA_OK;
+    int i,j;
+    int result = 0;
+
+    printf("\n Verifying Video on front and back camera...\n");
+    for(i = 0; i < cam_apps->num_cameras; i++) {
+        if( mm_app_open(i) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+        }
+        if(system_dimension_set(my_cam_app.cam_open) != MM_CAMERA_OK){
+            CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+        }
+
+        if( MM_CAMERA_OK != (rc = startPreview(my_cam_app.cam_open))) {
+            CDBG_ERROR("%s: startPreview() err=%d\n", __func__, rc);
+            break;
+        }
+        for(j = 0; j < MM_QCAMERA_APP_INTERATION; j++) {
+            if( MM_CAMERA_OK != (rc = startRecording(my_cam_app.cam_open))) {
+                CDBG_ERROR("%s: StartVideorecording() err=%d\n", __func__, rc);
+                break;
+            }
+
+            /*if(mm_camera_app_timedwait() == ETIMEDOUT) {
+            CDBG_ERROR("%s: Video Callback not received in time\n", __func__);
+            break;
+            }*/
+            mm_camera_app_wait();
+            if( MM_CAMERA_OK != (rc = stopRecording(my_cam_app.cam_open))) {
+                CDBG_ERROR("%s: Stopvideorecording() err=%d\n", __func__, rc);
+                break;
+            }
+            result++;
+        }
+        if( MM_CAMERA_OK != (rc = stopPreview(my_cam_app.cam_open))) {
+            CDBG("%s: startPreview() err=%d\n", __func__, rc);
+            break;
+        }
+        if( mm_app_close(my_cam_app.cam_open) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+        }
+        if(result != MM_QCAMERA_APP_INTERATION) {
+            printf("%s: Video Start/Stop Fails for Camera %d in %d iteration", __func__, i,j);
+            rc = -1;
+            break;
+        }
+
+        result = 0;
+    }
+end:
+    if(rc == 0) {
+        printf("\nPassed\n");
+    }else{
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_dtc_s_3(mm_camera_app_t *cam_apps)
+{
+    int rc = MM_CAMERA_OK;
+    int i,j;
+    int result = 0;
+
+    printf("\n Verifying RDI Stream on front and back camera...\n");
+    if(cam_apps->num_cameras == 0) {
+        CDBG_ERROR("%s:Query Failed: Num of cameras = %d\n",__func__, cam_apps->num_cameras);
+        rc = -1;
+        goto end;
+    }
+    for(i = 0; i < cam_apps->num_cameras; i++) {
+        if( mm_app_open(i) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+        }
+        if(system_dimension_set(my_cam_app.cam_open) != MM_CAMERA_OK){
+            CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+        }
+        for(j = 0; j < MM_QCAMERA_APP_INTERATION; j++) {
+            if( MM_CAMERA_OK != (rc = startRdi(my_cam_app.cam_open))) {
+                CDBG_ERROR("%s: StartVideorecording() err=%d\n", __func__, rc);
+                break;
+            }
+
+            /*if(mm_camera_app_timedwait() == ETIMEDOUT) {
+            CDBG_ERROR("%s: Video Callback not received in time\n", __func__);
+            break;
+            }*/
+            mm_camera_app_wait();
+            if( MM_CAMERA_OK != (rc = stopRdi(my_cam_app.cam_open))) {
+                CDBG_ERROR("%s: Stopvideorecording() err=%d\n", __func__, rc);
+                break;
+            }
+            result++;
+        }
+        if( mm_app_close(my_cam_app.cam_open) != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+            rc = -1;
+            goto end;
+        }
+        if(result != MM_QCAMERA_APP_INTERATION) {
+            printf("%s: Video Start/Stop Fails for Camera %d in %d iteration", __func__, i,j);
+            rc = -1;
+            break;
+        }
+
+        result = 0;
+    }
+end:
+    if(rc == 0) {
+        printf("\nPassed\n");
+    }else{
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+/*Stats Test Case*/
+int mm_app_dtc_s_5(mm_camera_app_t *cam_apps)
+{
+    int rc = MM_CAMERA_OK;
+    int i,j;
+    int result = 0;
+    int front_camera = 1;
+    int back_camera = 0;
+
+    printf("\n Verifying Preview on back Camera and RDI on Front camera...\n");
+
+    if(mm_app_open(back_camera) != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:mm_app_open() back camera err=%d\n",__func__, rc);
+        rc = -1;
+        goto end;
+    }
+    if(system_dimension_set(back_camera) != MM_CAMERA_OK){
+    CDBG_ERROR("%s:system_dimension_set() err=%d\n",__func__, rc);
+        rc = -1;
+        goto end;
+    }
+
+    if( MM_CAMERA_OK != (rc = startStats(back_camera))) {
+        CDBG_ERROR("%s: back camera startPreview() err=%d\n", __func__, rc);
+        goto end;
+    }
+
+    mm_camera_app_wait();
+
+    if( MM_CAMERA_OK != (rc = stopStats(my_cam_app.cam_open))) {
+        CDBG("%s: startPreview() err=%d\n", __func__, rc);
+        goto end;
+    }
+
+    if( mm_app_close(my_cam_app.cam_open) != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:mm_app_close() err=%d\n",__func__, rc);
+        rc = -1;
+        goto end;
+    }
+end:
+    if(rc == 0) {
+        printf("\nPassed\n");
+    }else{
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_gen_dual_test_cases()
+{
+    int tc = 0;
+    memset(mm_app_tc, 0, sizeof(mm_app_tc));
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_0;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_1;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_2;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_3;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_4;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_5;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_6;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_7;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_8;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_9;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_10;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_11;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_12;
+    if(tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_dtc_13;
+
+    return tc;
+}
+
+int mm_app_dual_test_entry(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, tc = 0;
+    int cam_id = 0;
+
+    tc = mm_app_gen_dual_test_cases();
+    CDBG("Running %d test cases\n",tc);
+    for(i = 0; i < tc; i++) {
+        mm_app_tc[i].r = mm_app_tc[i].f(cam_app);
+        if(mm_app_tc[i].r != MM_CAMERA_OK) {
+            printf("%s: test case %d error = %d, abort unit testing engine!!!!\n",
+                    __func__, i, mm_app_tc[i].r);
+            rc = mm_app_tc[i].r;
+            goto end;
+        }
+    }
+end:
+    printf("nTOTAL_TSET_CASE = %d, NUM_TEST_RAN = %d, rc=%d\n", tc, i, rc);
+    return rc;
+}
+
+
+
+
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_main_menu.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_main_menu.c
new file mode 100644
index 0000000..d4d785d
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_main_menu.c
@@ -0,0 +1,2061 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <inttypes.h>
+
+#include "mm_qcamera_main_menu.h"
+#include "mm_qcamera_app.h"
+#include "mm_qcamera_dbg.h"
+#include "mm_qcamera_socket.h"
+
+/*===========================================================================
+ * Macro
+ *===========================================================================*/
+#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
+#define VIDEO_BUFFER_SIZE       (PREVIEW_WIDTH * PREVIEW_HEIGHT * 3/2)
+#define THUMBNAIL_BUFFER_SIZE   (THUMBNAIL_WIDTH * THUMBNAIL_HEIGHT * 3/2)
+#define SNAPSHOT_BUFFER_SIZE    (PICTURE_WIDTH * PICTURE_HEIGHT * 3/2)
+//TODO:check this Macros with current app.
+
+/*===========================================================================
+ * Defines
+ *===========================================================================*/
+//#define VIDEO_FRAMES_NUM      4
+#define THUMBNAIL_FRAMES_NUM  1
+#define SNAPSHOT_FRAMES_NUM   1
+#define MAX_NUM_FORMAT        32
+#define ZOOM_STEP             2
+#define ZOOM_MIN_VALUE        0
+#define EXPOSURE_COMPENSATION_MAXIMUM_NUMERATOR 12
+#define EXPOSURE_COMPENSATION_MINIMUM_NUMERATOR -12
+#define EXPOSURE_COMPENSATION_DEFAULT_NUMERATOR 0
+#define EXPOSURE_COMPENSATION_DENOMINATOR 6
+
+//TODO: find correct values of Contrast defines.
+#define CAMERA_MIN_CONTRAST    0
+#define CAMERA_DEF_CONTRAST    5
+#define CAMERA_MAX_CONTRAST    10
+#define CAMERA_CONTRAST_STEP   1
+
+//TODO: find correct values of Brightness defines.
+#define CAMERA_MIN_BRIGHTNESS  0
+#define CAMERA_DEF_BRIGHTNESS  3
+#define CAMERA_MAX_BRIGHTNESS  6
+#define CAMERA_BRIGHTNESS_STEP 1
+
+//TODO: find correct values of Saturation defines.
+#define CAMERA_MIN_SATURATION  0
+#define CAMERA_DEF_SATURATION  5
+#define CAMERA_MAX_SATURATION  10
+#define CAMERA_SATURATION_STEP 1
+
+#define CAMERA_MIN_SHARPNESS 0
+#define CAMERA_MAX_SHARPNESS 10
+#define CAMERA_DEF_SHARPNESS 5
+#define CAMERA_SHARPNESS_STEP 1
+
+const CAMERA_MAIN_MENU_TBL_T camera_main_menu_tbl[] = {
+  {START_PREVIEW,               "Start preview"},
+  {STOP_PREVIEW,               "Stop preview/video"},
+  {SET_WHITE_BALANCE,          "Set white balance mode"},
+  {SET_TINTLESS_ENABLE,        "Set Tintless Enable"},
+  {SET_TINTLESS_DISABLE,       "Set Tintless Disable"},
+  {SET_EXP_METERING,           "Set exposure metering mode"},
+  {GET_CTRL_VALUE,             "Get control value menu"},
+  {TOGGLE_AFR,                 "Toggle auto frame rate. Default fixed frame rate"},
+  {SET_ISO,                    "ISO changes."},
+  {BRIGHTNESS_GOTO_SUBMENU,    "Brightness changes."},
+  {CONTRAST_GOTO_SUBMENU,      "Contrast changes."},
+  {EV_GOTO_SUBMENU,            "EV changes."},
+  {SATURATION_GOTO_SUBMENU,    "Saturation changes."},
+  {SET_ZOOM,                   "Set Digital Zoom."},
+  {SET_SHARPNESS,              "Set Sharpness."},
+  {TAKE_JPEG_SNAPSHOT,         "Take a snapshot"},
+  {START_RECORDING,            "Start RECORDING"},
+  {STOP_RECORDING,             "Stop RECORDING"},
+  {BEST_SHOT,                  "Set best-shot mode"},
+  {LIVE_SHOT,                  "Take a live snapshot"},
+  {FLASH_MODES,                "Set Flash modes"},
+  {TOGGLE_ZSL,                 "Toggle ZSL On/Off"},
+  {TAKE_RAW_SNAPSHOT,          "Take RAW snapshot"},
+  {SWITCH_SNAP_RESOLUTION,     "Select Jpeg resolution"},
+  {TOGGLE_WNR,                 "Toggle Wavelet Denoise"},
+  {EXIT,                       "Exit"}
+};
+
+CAMERA_SENSOR_MENU_TLB_T sensor_tbl[] = {
+        {"Primary Camera",      0},
+        {"Secondary Camera",    0},
+        {"Camera Sensor 3",     0},
+        {"Camera Sensor 4",     0}
+};
+
+const CAMERA_BRIGHTNESS_TBL_T brightness_change_tbl[] = {
+  {INC_BRIGHTNESS, "Increase Brightness by one step."},
+  {DEC_BRIGHTNESS, "Decrease Brightness by one step."},
+};
+
+const CAMERA_CONTRST_TBL_T contrast_change_tbl[] = {
+  {INC_CONTRAST, "Increase Contrast by one step."},
+  {DEC_CONTRAST, "Decrease Contrast by one step."},
+};
+
+const CAMERA_EV_TBL_T camera_EV_tbl[] = {
+  {INCREASE_EV, "Increase EV by one step."},
+  {DECREASE_EV, "Decrease EV by one step."},
+};
+
+const CAMERA_SATURATION_TBL_T camera_saturation_tbl[] = {
+  {INC_SATURATION, "Increase Satuation by one step."},
+  {DEC_SATURATION, "Decrease Satuation by one step."},
+};
+
+const CAMERA_SHARPNESS_TBL_T camera_sharpness_tbl[] = {
+  {INC_SHARPNESS, "Increase Sharpness."},
+  {DEC_SHARPNESS, "Decrease Sharpness."},
+};
+
+const WHITE_BALANCE_TBL_T white_balance_tbl[] = {
+  {   WB_AUTO,               "White Balance - Auto"},
+  {   WB_INCANDESCENT,       "White Balance - Incandescent"},
+  {   WB_FLUORESCENT,        "White Balance - Fluorescent"},
+  {   WB_WARM_FLUORESCENT,   "White Balance - Warm Fluorescent"},
+  {   WB_DAYLIGHT,           "White Balance - Daylight"},
+  {   WB_CLOUDY_DAYLIGHT,    "White Balance - Cloudy Daylight"},
+  {   WB_TWILIGHT,           "White Balance - Twilight"},
+  {   WB_SHADE,              "White Balance - Shade"},
+};
+
+const GET_CTRL_TBL_T get_ctrl_tbl[] = {
+  {     WHITE_BALANCE_STATE,            "Get white balance state (auto/off)"},
+  {     WHITE_BALANCE_TEMPERATURE,      "Get white balance temperature"},
+  {     BRIGHTNESS_CTRL,                "Get brightness value"},
+  {     EV,                             "Get exposure value"},
+  {     CONTRAST_CTRL,                  "Get contrast value"},
+  {     SATURATION_CTRL,                "Get saturation value"},
+  {     SHARPNESS_CTRL,                 "Get sharpness value"},
+};
+
+const EXP_METERING_TBL_T exp_metering_tbl[] = {
+  {   AUTO_EXP_FRAME_AVG,          "Exposure Metering - Frame Average"},
+  {   AUTO_EXP_CENTER_WEIGHTED,    "Exposure Metering - Center Weighted"},
+  {   AUTO_EXP_SPOT_METERING,      "Exposure Metering - Spot Metering"},
+  {   AUTO_EXP_SMART_METERING,     "Exposure Metering - Smart Metering"},
+  {   AUTO_EXP_USER_METERING,      "Exposure Metering - User Metering"},
+  {   AUTO_EXP_SPOT_METERING_ADV,  "Exposure Metering - Spot Metering Adv"},
+  {   AUTO_EXP_CENTER_WEIGHTED_ADV,"Exposure Metering - Center Weighted Adv"},
+};
+
+const ISO_TBL_T iso_tbl[] = {
+  {   ISO_AUTO,   "ISO: Auto"},
+  {   ISO_DEBLUR, "ISO: Deblur"},
+  {   ISO_100,    "ISO: 100"},
+  {   ISO_200,    "ISO: 200"},
+  {   ISO_400,    "ISO: 400"},
+  {   ISO_800,    "ISO: 800"},
+  {   ISO_1600,   "ISO: 1600"},
+};
+
+const ZOOM_TBL_T zoom_tbl[] = {
+  {   ZOOM_IN,  "Zoom In one step"},
+  {   ZOOM_OUT, "Zoom Out one step"},
+};
+
+const BESTSHOT_MODE_TBT_T bestshot_mode_tbl[] = {
+  {BESTSHOT_AUTO,           "Bestshot Mode: Auto"},
+  {BESTSHOT_ACTION,         "Bestshot Mode: Action"},
+  {BESTSHOT_PORTRAIT,       "Bestshot Mode: Portrait"},
+  {BESTSHOT_LANDSCAPE,      "Bestshot Mode: Landscape"},
+  {BESTSHOT_NIGHT,          "Bestshot Mode: Night"},
+  {BESTSHOT_NIGHT_PORTRAIT, "Bestshot Mode: Night Portrait"},
+  {BESTSHOT_THEATRE,        "Bestshot Mode: Theatre"},
+  {BESTSHOT_BEACH,          "Bestshot Mode: Beach"},
+  {BESTSHOT_SNOW,           "Bestshot Mode: Snow"},
+  {BESTSHOT_SUNSET,         "Bestshot Mode: Sunset"},
+  {BESTSHOT_ANTISHAKE,      "Bestshot Mode: Antishake"},
+  {BESTSHOT_FIREWORKS,      "Bestshot Mode: Fireworks"},
+  {BESTSHOT_SPORTS,         "Bestshot Mode: Sports"},
+  {BESTSHOT_PARTY,          "Bestshot Mode: Party"},
+  {BESTSHOT_CANDLELIGHT,    "Bestshot Mode: Candlelight"},
+  {BESTSHOT_ASD,            "Bestshot Mode: ASD"},
+  {BESTSHOT_BACKLIGHT,      "Bestshot Mode: Backlight"},
+  {BESTSHOT_FLOWERS,        "Bestshot Mode: Flowers"},
+  {BESTSHOT_AR,             "Bestshot Mode: Augmented Reality"},
+  {BESTSHOT_HDR,            "Bestshot Mode: HDR"},
+};
+
+const FLASH_MODE_TBL_T flashmodes_tbl[] = {
+  {   FLASH_MODE_OFF,   "Flash Mode Off"},
+  {   FLASH_MODE_AUTO,  "Flash Mode Auto"},
+  {   FLASH_MODE_ON,    "Flash Mode On"},
+  {   FLASH_MODE_TORCH, "Flash Mode Torch"},
+};
+
+DIMENSION_TBL_T dimension_tbl[] = {
+{VGA_WIDTH,      VGA_HEIGHT,      "VGA",   "Size: VGA <640x480>"   , 0},
+{MP1_WIDTH,      MP1_HEIGHT,      "1MP",   "Size: 1MP <1280x960>"  , 0},
+{MP5_WIDTH,      MP5_HEIGHT,      "5MP",   "Size: 5MP <2592x1944>",  0},
+{MP8_WIDTH,      MP8_HEIGHT,      "8MP",   "Size: 8MP <3264x2448>",  0},
+{MP12_WIDTH,     MP12_HEIGHT,     "12MP",  "Size: 12MP <4000x3000>", 0},
+};
+
+/*===========================================================================
+ * Forward declarations
+ *===========================================================================*/
+//static void system_dimension_set(mm_camera_test_obj_t *test_obj);
+/*===========================================================================
+ * Static global variables
+ *===========================================================================*/
+USER_INPUT_DISPLAY_T input_display;
+int preview_video_resolution_flag = 0;
+
+//TODO: default values.
+#if 1
+int brightness = CAMERA_DEF_BRIGHTNESS;
+int contrast = CAMERA_DEF_CONTRAST;
+int saturation = CAMERA_DEF_SATURATION;
+int sharpness = CAMERA_DEF_SHARPNESS;
+#else
+int brightness = 0;
+int contrast = 0;
+int saturation = 0;
+int sharpness = 0;
+#endif
+//TODO: find new method to calculate ev.
+//int32_t ev_numerator = EXPOSURE_COMPENSATION_DEFAULT_NUMERATOR;
+
+//TODO:
+//fps_mode_t fps_mode = FPS_MODE_FIXED;
+int zoom_level;
+int zoom_max_value;
+int cam_id;
+int is_rec = 0;
+
+
+static int submain();
+
+/*===========================================================================
+ * FUNCTION    - keypress_to_event -
+ *
+ * DESCRIPTION:
+ *==========================================================================*/
+int keypress_to_event(char keypress)
+{
+  int out_buf = INVALID_KEY_PRESS;
+  if ((keypress >= 'A' && keypress <= 'Z') ||
+    (keypress >= 'a' && keypress <= 'z')) {
+    out_buf = tolower(keypress);
+    out_buf = out_buf - 'a';
+  } else if (keypress >= '0' && keypress <= '9') {
+    out_buf = keypress - '0';
+  }
+  return out_buf;
+}
+
+int next_menu(menu_id_change_t current_menu_id, char keypress, camera_action_t * action_id_ptr, int * action_param)
+{
+  int output_to_event;
+  menu_id_change_t next_menu_id = MENU_ID_INVALID;
+  * action_id_ptr = ACTION_NO_ACTION;
+
+  output_to_event = keypress_to_event(keypress);
+  CDBG("current_menu_id=%d\n",current_menu_id);
+  printf("output_to_event=%d\n",output_to_event);
+  switch(current_menu_id) {
+    case MENU_ID_MAIN:
+      switch(output_to_event) {
+        case START_PREVIEW:
+          * action_id_ptr = ACTION_START_PREVIEW;
+          CDBG("START_PREVIEW\n");
+          break;
+        case STOP_PREVIEW:
+          * action_id_ptr = ACTION_STOP_PREVIEW;
+          CDBG("STOP_PREVIEW\n");
+          break;
+
+        case SET_WHITE_BALANCE:
+          next_menu_id = MENU_ID_WHITEBALANCECHANGE;
+          CDBG("next_menu_id = MENU_ID_WHITEBALANCECHANGE = %d\n", next_menu_id);
+          break;
+
+        case SET_TINTLESS_ENABLE:
+          * action_id_ptr = ACTION_SET_TINTLESS_ENABLE;
+          next_menu_id = MENU_ID_MAIN;
+          CDBG("next_menu_id = MENU_ID_TINTLESSENABLE = %d\n", next_menu_id);
+          break;
+
+        case SET_TINTLESS_DISABLE:
+          * action_id_ptr = ACTION_SET_TINTLESS_DISABLE;
+          next_menu_id = MENU_ID_MAIN;
+          CDBG("next_menu_id = MENU_ID_TINTLESSDISABLE = %d\n", next_menu_id);
+          break;
+
+        case SET_EXP_METERING:
+          next_menu_id = MENU_ID_EXPMETERINGCHANGE;
+          CDBG("next_menu_id = MENU_ID_EXPMETERINGCHANGE = %d\n", next_menu_id);
+          break;
+
+        case GET_CTRL_VALUE:
+          next_menu_id = MENU_ID_GET_CTRL_VALUE;
+          CDBG("next_menu_id = MENU_ID_GET_CTRL_VALUE = %d\n", next_menu_id);
+          break;
+
+        case BRIGHTNESS_GOTO_SUBMENU:
+          next_menu_id = MENU_ID_BRIGHTNESSCHANGE;
+          CDBG("next_menu_id = MENU_ID_BRIGHTNESSCHANGE = %d\n", next_menu_id);
+          break;
+
+        case CONTRAST_GOTO_SUBMENU:
+          next_menu_id = MENU_ID_CONTRASTCHANGE;
+          break;
+
+        case EV_GOTO_SUBMENU:
+          next_menu_id = MENU_ID_EVCHANGE;
+          break;
+
+        case SATURATION_GOTO_SUBMENU:
+          next_menu_id = MENU_ID_SATURATIONCHANGE;
+          break;
+
+        case TOGGLE_AFR:
+          * action_id_ptr = ACTION_TOGGLE_AFR;
+          CDBG("next_menu_id = MENU_ID_TOGGLEAFR = %d\n", next_menu_id);
+          break;
+
+        case SET_ISO:
+          next_menu_id = MENU_ID_ISOCHANGE;
+          CDBG("next_menu_id = MENU_ID_ISOCHANGE = %d\n", next_menu_id);
+          break;
+
+        case SET_ZOOM:
+          next_menu_id = MENU_ID_ZOOMCHANGE;
+          CDBG("next_menu_id = MENU_ID_ZOOMCHANGE = %d\n", next_menu_id);
+          break;
+
+        case BEST_SHOT:
+          next_menu_id = MENU_ID_BESTSHOT;
+          CDBG("next_menu_id = MENU_ID_BESTSHOT = %d\n", next_menu_id);
+          break;
+
+        case LIVE_SHOT:
+          * action_id_ptr = ACTION_TAKE_LIVE_SNAPSHOT;
+          CDBG("\nTaking Live snapshot\n");
+          break;
+
+        case FLASH_MODES:
+          next_menu_id = MENU_ID_FLASHMODE;
+          CDBG("next_menu_id = MENU_ID_FLASHMODE = %d\n", next_menu_id);
+          break;
+
+        case SET_SHARPNESS:
+          next_menu_id = MENU_ID_SHARPNESSCHANGE;
+          CDBG("next_menu_id = MENU_ID_SHARPNESSCHANGE = %d\n", next_menu_id);
+          break;
+
+        case SWITCH_SNAP_RESOLUTION:
+          next_menu_id = MENU_ID_SWITCH_RES;
+          CDBG("next_menu_id = MENU_ID_SWITCH_RES = %d\n", next_menu_id);
+          break;
+
+        case TAKE_JPEG_SNAPSHOT:
+          * action_id_ptr = ACTION_TAKE_JPEG_SNAPSHOT;
+          printf("\n Taking JPEG snapshot\n");
+          break;
+
+        case START_RECORDING:
+          * action_id_ptr = ACTION_START_RECORDING;
+          CDBG("Start recording\n");
+          break;
+        case STOP_RECORDING:
+          * action_id_ptr = ACTION_STOP_RECORDING;
+          CDBG("Stop recording\n");
+          break;
+        case TOGGLE_ZSL:
+          * action_id_ptr = ACTION_TOGGLE_ZSL;
+          CDBG("Toggle ZSL\n");
+          break;
+        case TAKE_RAW_SNAPSHOT:
+            * action_id_ptr = ACTION_TAKE_RAW_SNAPSHOT;
+            next_menu_id = MENU_ID_MAIN;
+            CDBG("Capture RAW\n");
+            break;
+        case TOGGLE_WNR:
+            * action_id_ptr = ACTION_TOGGLE_WNR;
+            next_menu_id = MENU_ID_MAIN;
+            CDBG("Toggle WNR");
+            break;
+        case EXIT:
+          * action_id_ptr = ACTION_EXIT;
+          CDBG("Exit \n");
+          break;
+        default:
+          next_menu_id = MENU_ID_MAIN;
+          CDBG("next_menu_id = MENU_ID_MAIN = %d\n", next_menu_id);
+          break;
+      }
+      break;
+
+    case MENU_ID_SWITCH_RES:
+        printf("MENU_ID_SWITCH_RES\n");
+        *action_id_ptr = ACTION_SWITCH_RESOLUTION;
+        *action_param = output_to_event;
+        int available_sizes = sizeof(dimension_tbl)/sizeof(dimension_tbl[0]);
+        if ( ( *action_param >= 0 ) &&
+             ( *action_param < available_sizes ) &&
+             ( dimension_tbl[*action_param].supported )) {
+            next_menu_id = MENU_ID_MAIN;
+        }
+        else {
+          next_menu_id = current_menu_id;
+        }
+        break;
+
+    case MENU_ID_SENSORS:
+        next_menu_id = MENU_ID_MAIN;
+        *action_id_ptr = ACTION_SWITCH_CAMERA;
+        *action_param = output_to_event;
+        break;
+
+    case MENU_ID_WHITEBALANCECHANGE:
+      printf("MENU_ID_WHITEBALANCECHANGE\n");
+      if (output_to_event >= WB_MAX) {
+        next_menu_id = current_menu_id;
+        * action_id_ptr = ACTION_NO_ACTION;
+      } else {
+        next_menu_id = MENU_ID_MAIN;
+        * action_id_ptr = ACTION_SET_WHITE_BALANCE;
+        * action_param = output_to_event;
+      }
+      break;
+
+    case MENU_ID_EXPMETERINGCHANGE:
+      printf("MENU_ID_EXPMETERINGCHANGE\n");
+      if (output_to_event >= AUTO_EXP_MAX) {
+        next_menu_id = current_menu_id;
+        * action_id_ptr = ACTION_NO_ACTION;
+      } else {
+        next_menu_id = MENU_ID_MAIN;
+        * action_id_ptr = ACTION_SET_EXP_METERING;
+        * action_param = output_to_event;
+      }
+      break;
+
+    case MENU_ID_GET_CTRL_VALUE:
+      printf("MENU_ID_GET_CTRL_VALUE\n");
+      * action_id_ptr = ACTION_GET_CTRL_VALUE;
+      if (output_to_event > 0 &&
+        output_to_event <= (int)(sizeof(get_ctrl_tbl)/sizeof(get_ctrl_tbl[0]))) {
+          next_menu_id = MENU_ID_MAIN;
+          * action_param = output_to_event;
+      }
+      else {
+        next_menu_id = current_menu_id;
+      }
+      break;
+
+    case MENU_ID_BRIGHTNESSCHANGE:
+      switch (output_to_event) {
+        case INC_BRIGHTNESS:
+          * action_id_ptr = ACTION_BRIGHTNESS_INCREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+
+        case DEC_BRIGHTNESS:
+          * action_id_ptr = ACTION_BRIGHTNESS_DECREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+
+        default:
+          next_menu_id = MENU_ID_BRIGHTNESSCHANGE;
+          break;
+      }
+      break;
+
+    case MENU_ID_CONTRASTCHANGE:
+      switch (output_to_event) {
+        case INC_CONTRAST:
+          * action_id_ptr = ACTION_CONTRAST_INCREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+
+        case DEC_CONTRAST:
+          * action_id_ptr = ACTION_CONTRAST_DECREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+
+        default:
+          next_menu_id = MENU_ID_CONTRASTCHANGE;
+          break;
+      }
+      break;
+
+    case MENU_ID_EVCHANGE:
+      switch (output_to_event) {
+        case INCREASE_EV:
+          * action_id_ptr = ACTION_EV_INCREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+
+        case DECREASE_EV:
+          * action_id_ptr = ACTION_EV_DECREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+
+        default:
+          next_menu_id = MENU_ID_EVCHANGE;
+          break;
+      }
+      break;
+
+    case MENU_ID_SATURATIONCHANGE:
+      switch (output_to_event) {
+        case INC_SATURATION:
+          * action_id_ptr = ACTION_SATURATION_INCREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+
+        case DEC_SATURATION:
+          * action_id_ptr = ACTION_SATURATION_DECREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+
+        default:
+          next_menu_id = MENU_ID_EVCHANGE;
+          break;
+      }
+      break;
+
+    case MENU_ID_ISOCHANGE:
+      printf("MENU_ID_ISOCHANGE\n");
+      if (output_to_event >= ISO_MAX) {
+        next_menu_id = current_menu_id;
+        * action_id_ptr = ACTION_NO_ACTION;
+      } else {
+        next_menu_id = MENU_ID_MAIN;
+        * action_id_ptr = ACTION_SET_ISO;
+        * action_param = output_to_event;
+      }
+      break;
+
+    case MENU_ID_ZOOMCHANGE:
+      * action_id_ptr = ACTION_SET_ZOOM;
+      if (output_to_event > 0 &&
+        output_to_event <= (int)(sizeof(zoom_tbl)/sizeof(zoom_tbl[0]))) {
+          next_menu_id = MENU_ID_MAIN;
+          * action_param = output_to_event;
+      } else {
+        next_menu_id = current_menu_id;
+      }
+      break;
+
+    case MENU_ID_SHARPNESSCHANGE:
+      switch (output_to_event) {
+        case INC_SHARPNESS:
+          * action_id_ptr = ACTION_SHARPNESS_INCREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+        case DEC_SHARPNESS:
+          * action_id_ptr = ACTION_SHARPNESS_DECREASE;
+          next_menu_id = MENU_ID_MAIN;
+          break;
+        default:
+          next_menu_id = MENU_ID_SHARPNESSCHANGE;
+          break;
+      }
+      break;
+
+    case MENU_ID_BESTSHOT:
+      if (output_to_event >= BESTSHOT_MAX) {
+        next_menu_id = current_menu_id;
+        * action_id_ptr = ACTION_NO_ACTION;
+      } else {
+        next_menu_id = MENU_ID_MAIN;
+        * action_id_ptr = ACTION_SET_BESTSHOT_MODE;
+        * action_param = output_to_event;
+      }
+      break;
+
+    case MENU_ID_FLASHMODE:
+      if (output_to_event >= FLASH_MODE_MAX) {
+        next_menu_id = current_menu_id;
+        * action_id_ptr = ACTION_NO_ACTION;
+      } else {
+        next_menu_id = MENU_ID_MAIN;
+        * action_id_ptr = ACTION_SET_FLASH_MODE;
+        * action_param = output_to_event;
+      }
+      break;
+
+    default:
+      CDBG("menu id is wrong: %d\n", current_menu_id);
+      break;
+  }
+
+  return next_menu_id;
+}
+
+/*===========================================================================
+ * FUNCTION    - print_menu_preview_video -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+static void print_menu_preview_video(void) {
+  unsigned int i;
+  if (!is_rec) {
+    printf("\n");
+    printf("===========================================\n");
+    printf("      Camera is in preview/video mode now        \n");
+    printf("===========================================\n\n");
+  } else {
+    printf("\n");
+    printf("===========================================\n");
+    printf("      Camera is in RECORDING mode now       \n");
+    printf("        Press 'Q' To Stop Recording          \n");
+    printf("        Press 'S' To Take Live Snapshot       \n");
+    printf("===========================================\n\n");
+  }
+  char menuNum = 'A';
+  for (i = 0; i < sizeof(camera_main_menu_tbl)/sizeof(camera_main_menu_tbl[0]); i++) {
+    if (i == BASE_OFFSET) {
+      menuNum = '1';
+    }
+
+    printf("%c.  %s\n", menuNum, camera_main_menu_tbl[i].menu_name);
+    menuNum++;
+  }
+
+  printf("\nPlease enter your choice: ");
+
+  return;
+}
+
+static void camera_preview_video_wb_change_tbl(void) {
+  unsigned int i;
+  printf("\n");
+  printf("==========================================================\n");
+  printf("      Camera is in white balance change mode       \n");
+  printf("==========================================================\n\n");
+
+  char submenuNum = 'A';
+  for (i = 0 ; i < sizeof(white_balance_tbl) /
+                   sizeof(white_balance_tbl[0]); i++) {
+        printf("%c.  %s\n", submenuNum, white_balance_tbl[i].wb_name);
+        submenuNum++;
+  }
+  printf("\nPlease enter your choice for White Balance modes: ");
+  return;
+}
+
+static void camera_preview_video_get_ctrl_value_tbl(void) {
+  unsigned int i;
+  printf("\n");
+  printf("==========================================================\n");
+  printf("      Camera is in get control value mode       \n");
+  printf("==========================================================\n\n");
+
+  char submenuNum = 'A';
+  for (i = 0 ; i < sizeof(get_ctrl_tbl) /
+                   sizeof(get_ctrl_tbl[0]); i++) {
+        printf("%c.  %s\n", submenuNum, get_ctrl_tbl[i].get_ctrl_name);
+        submenuNum++;
+  }
+  printf("\nPlease enter your choice for control value you want to get: ");
+  return;
+}
+
+static void camera_preview_video_exp_metering_change_tbl(void) {
+  unsigned int i;
+  printf("\n");
+  printf("==========================================================\n");
+  printf("      Camera is in exposure metering change mode       \n");
+  printf("==========================================================\n\n");
+
+  char submenuNum = 'A';
+  for (i = 0 ; i < sizeof(exp_metering_tbl) /
+                   sizeof(exp_metering_tbl[0]); i++) {
+        printf("%c.  %s\n", submenuNum, exp_metering_tbl[i].exp_metering_name);
+        submenuNum++;
+  }
+  printf("\nPlease enter your choice for exposure metering modes: ");
+  return;
+}
+
+static void camera_contrast_change_tbl(void) {
+    unsigned int i;
+
+    printf("\n");
+    printf("==========================================================\n");
+    printf("      Camera is in change contrast resolution mode       \n");
+    printf("==========================================================\n\n");
+
+    char contrastmenuNum = 'A';
+    for (i = 0; i < sizeof(contrast_change_tbl) /
+                    sizeof(contrast_change_tbl[0]); i++) {
+        printf("%c.  %s\n", contrastmenuNum,
+                            contrast_change_tbl[i].contrast_name);
+        contrastmenuNum++;
+    }
+
+    printf("\nPlease enter your choice for contrast Change: ");
+    return;
+}
+
+static void camera_EV_change_tbl(void) {
+  unsigned int i;
+
+  printf("\n");
+  printf("===========================================\n");
+  printf("      Camera is in EV change mode now       \n");
+  printf("===========================================\n\n");
+
+  char submenuNum = 'A';
+  for (i = 0; i < sizeof(camera_EV_tbl)/sizeof(camera_EV_tbl[0]); i++) {
+    printf("%c.  %s\n", submenuNum, camera_EV_tbl[i].EV_name);
+    submenuNum++;
+  }
+
+  printf("\nPlease enter your choice for EV changes: ");
+  return;
+}
+
+static void camera_resolution_change_tbl(void) {
+    unsigned int i;
+
+    printf("\n");
+    printf("==========================================================\n");
+    printf("      Camera is in snapshot resolution mode               \n");
+    printf("==========================================================\n\n");
+
+    for (i = 0; i < sizeof(dimension_tbl) /
+      sizeof(dimension_tbl[0]); i++) {
+        if ( dimension_tbl[i].supported ) {
+            printf("%d.  %s\n", i,
+                    dimension_tbl[i].str_name);
+        }
+    }
+
+    printf("\nPlease enter your choice for Resolution: ");
+    return;
+}
+
+static void camera_preview_video_zoom_change_tbl(void) {
+    unsigned int i;
+    zoom_max_value = MAX_ZOOMS_CNT;
+    printf("\nCurrent Zoom Value = %d ,Max Zoom Value = %d\n",zoom_level,zoom_max_value);
+    char submenuNum = 'A';
+    for (i = 0 ; i < sizeof(zoom_tbl) /
+                   sizeof(zoom_tbl[0]); i++) {
+        printf("%c.  %s\n", submenuNum, zoom_tbl[i].zoom_direction_name);
+        submenuNum++;
+    }
+    printf("\nPlease enter your choice for zoom change direction: ");
+    return;
+}
+
+static void camera_brightness_change_tbl(void) {
+    unsigned int i;
+
+    printf("\n");
+    printf("==========================================================\n");
+    printf("      Camera is in change brightness mode       \n");
+    printf("==========================================================\n\n");
+
+    char brightnessmenuNum = 'A';
+    for (i = 0; i < sizeof(brightness_change_tbl) /
+                    sizeof(brightness_change_tbl[0]); i++) {
+        printf("%c.  %s\n", brightnessmenuNum,
+                            brightness_change_tbl[i].brightness_name);
+        brightnessmenuNum++;
+    }
+
+    printf("\nPlease enter your choice for Brightness Change: ");
+    return;
+}
+
+static void camera_saturation_change_tbl(void) {
+    unsigned int i;
+
+    printf("\n");
+    printf("==========================================================\n");
+    printf("      Camera is in change saturation mode       \n");
+    printf("==========================================================\n\n");
+
+    char saturationmenuNum = 'A';
+    for (i = 0; i < sizeof(camera_saturation_tbl) /
+                    sizeof(camera_saturation_tbl[0]); i++) {
+        printf("%c.  %s\n", saturationmenuNum,
+                            camera_saturation_tbl[i].saturation_name);
+        saturationmenuNum++;
+    }
+
+    printf("\nPlease enter your choice for Saturation Change: ");
+    return;
+}
+
+static void camera_preview_video_iso_change_tbl(void) {
+  unsigned int i;
+  printf("\n");
+  printf("==========================================================\n");
+  printf("      Camera is in ISO change mode       \n");
+  printf("==========================================================\n\n");
+
+  char submenuNum = 'A';
+  for (i = 0 ; i < sizeof(iso_tbl) /
+                   sizeof(iso_tbl[0]); i++) {
+        printf("%c.  %s\n", submenuNum, iso_tbl[i].iso_modes_name);
+        submenuNum++;
+  }
+  printf("\nPlease enter your choice for iso modes: ");
+  return;
+}
+
+static void camera_preview_video_sharpness_change_tbl(void) {
+  unsigned int i;
+  printf("\n");
+  printf("==========================================================\n");
+  printf("      Camera is in sharpness change mode       \n");
+  printf("==========================================================\n\n");
+
+  char submenuNum = 'A';
+  for (i = 0 ; i < sizeof(camera_sharpness_tbl) /
+                   sizeof(camera_sharpness_tbl[0]); i++) {
+        printf("%c.  %s\n", submenuNum, camera_sharpness_tbl[i].sharpness_name);
+        submenuNum++;
+  }
+  printf("\nPlease enter your choice for sharpness modes: ");
+  return;
+}
+
+static void camera_set_bestshot_tbl(void)
+{
+  unsigned int i;
+
+  printf("\n");
+  printf("===========================================\n");
+  printf("      Camera is in set besthot mode now       \n");
+  printf("===========================================\n\n");
+
+
+  char bsmenuNum = 'A';
+  for (i = 0; i < sizeof(bestshot_mode_tbl)/sizeof(bestshot_mode_tbl[0]); i++) {
+    printf("%c.  %s\n", bsmenuNum,
+      bestshot_mode_tbl[i].name);
+    bsmenuNum++;
+  }
+
+  printf("\nPlease enter your choice of Bestshot Mode: ");
+  return;
+}
+
+static void camera_set_flashmode_tbl(void)
+{
+  unsigned int i;
+
+  printf("\n");
+  printf("===========================================\n");
+  printf("      Camera is in set flash mode now       \n");
+  printf("===========================================\n\n");
+
+
+  char bsmenuNum = 'A';
+  for (i = 0; i < sizeof(flashmodes_tbl)/sizeof(flashmodes_tbl[0]); i++) {
+    printf("%c.  %s\n", bsmenuNum,
+      flashmodes_tbl[i].name);
+    bsmenuNum++;
+  }
+
+  printf("\nPlease enter your choice of Bestshot Mode: ");
+  return;
+}
+
+static void camera_sensors_tbl(void)
+{
+  unsigned int i;
+  size_t available_sensors = sizeof(sensor_tbl)/sizeof(sensor_tbl[0]);
+
+  printf("\n");
+  printf("===========================================\n");
+  printf("      Camera Sensor to be used:            \n");
+  printf("===========================================\n\n");
+
+
+  char bsmenuNum = 'A';
+  for (i = 0; ( i < available_sensors ) && ( sensor_tbl[i].present ) ; i++) {
+    printf("%c.  %s\n", bsmenuNum,
+            sensor_tbl[i].menu_name);
+    bsmenuNum++;
+  }
+
+  printf("\nPlease enter your choice for sensor: ");
+  return;
+}
+
+/*===========================================================================
+ * FUNCTION     - increase_contrast -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int increase_contrast (mm_camera_lib_handle *lib_handle) {
+        contrast += CAMERA_CONTRAST_STEP;
+        if (contrast > CAMERA_MAX_CONTRAST) {
+                contrast = CAMERA_MAX_CONTRAST;
+                printf("Reached max CONTRAST. \n");
+        }
+        printf("Increase Contrast to %d\n", contrast);
+        return mm_camera_lib_send_command(lib_handle,
+                                          MM_CAMERA_LIB_CONTRAST,
+                                          &contrast,
+                                          NULL);
+}
+
+/*===========================================================================
+ * FUNCTION     - decrease_contrast -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int decrease_contrast (mm_camera_lib_handle *lib_handle) {
+        contrast -= CAMERA_CONTRAST_STEP;
+        if (contrast < CAMERA_MIN_CONTRAST) {
+                contrast = CAMERA_MIN_CONTRAST;
+                printf("Reached min CONTRAST. \n");
+        }
+        printf("Decrease Contrast to %d\n", contrast);
+        return mm_camera_lib_send_command(lib_handle,
+                                          MM_CAMERA_LIB_CONTRAST,
+                                          &contrast,
+                                          NULL);
+}
+
+/*===========================================================================
+ * FUNCTION     - decrease_brightness -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int decrease_brightness (mm_camera_lib_handle *lib_handle) {
+        brightness -= CAMERA_BRIGHTNESS_STEP;
+        if (brightness < CAMERA_MIN_BRIGHTNESS) {
+                brightness = CAMERA_MIN_BRIGHTNESS;
+                printf("Reached min BRIGHTNESS. \n");
+        }
+        printf("Decrease Brightness to %d\n", brightness);
+        return mm_camera_lib_send_command(lib_handle,
+                                          MM_CAMERA_LIB_BRIGHTNESS,
+                                          &brightness,
+                                          NULL);
+}
+
+/*===========================================================================
+ * FUNCTION     - increase_brightness -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int increase_brightness (mm_camera_lib_handle *lib_handle) {
+        brightness += CAMERA_BRIGHTNESS_STEP;
+        if (brightness > CAMERA_MAX_BRIGHTNESS) {
+                brightness = CAMERA_MAX_BRIGHTNESS;
+                printf("Reached max BRIGHTNESS. \n");
+        }
+        printf("Increase Brightness to %d\n", brightness);
+        return mm_camera_lib_send_command(lib_handle,
+                                          MM_CAMERA_LIB_BRIGHTNESS,
+                                          &brightness,
+                                          NULL);
+}
+
+/*===========================================================================
+ * FUNCTION     - increase_EV -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+
+int increase_EV (void) {
+#if 0
+   int rc = 0;
+   int32_t value = 0;
+   rc = cam_config_is_parm_supported(cam_id, MM_CAMERA_PARM_EXPOSURE_COMPENSATION);
+    if(!rc) {
+       printf("MM_CAMERA_PARM_EXPOSURE_COMPENSATION mode is not supported for this sensor");
+       return -1;
+    }
+    ev_numerator += 1;
+    if(ev_numerator >= EXPOSURE_COMPENSATION_MINIMUM_NUMERATOR &&
+            ev_numerator <= EXPOSURE_COMPENSATION_MAXIMUM_NUMERATOR){
+        int16_t  numerator16 = (int16_t)(ev_numerator & 0x0000ffff);
+        uint16_t denominator16 = EXPOSURE_COMPENSATION_DENOMINATOR;
+        value = numerator16 << 16 | denominator16;
+    } else {
+       printf("Reached max EV.\n");
+    }
+    return mm_app_set_config_parm(cam_id, MM_CAMERA_PARM_EXPOSURE_COMPENSATION, value);
+#endif
+  return 0;
+}
+
+/*===========================================================================
+ * FUNCTION     - decrease_EV -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int decrease_EV (void) {
+#if 0
+   int rc = 0;
+   int32_t  value = 0;
+   rc = cam_config_is_parm_supported(cam_id, MM_CAMERA_PARM_EXPOSURE_COMPENSATION);
+    if(!rc) {
+       printf("MM_CAMERA_PARM_EXPOSURE_COMPENSATION mode is not supported for this sensor");
+       return -1;
+    }
+    ev_numerator -= 1;
+    if(ev_numerator >= EXPOSURE_COMPENSATION_MINIMUM_NUMERATOR &&
+            ev_numerator <= EXPOSURE_COMPENSATION_MAXIMUM_NUMERATOR){
+        int16_t  numerator16 = (int16_t)(ev_numerator & 0x0000ffff);
+        uint16_t denominator16 = EXPOSURE_COMPENSATION_DENOMINATOR;
+        value = numerator16 << 16 | denominator16;
+    } else {
+       printf("Reached min EV.\n");
+    }
+    return mm_app_set_config_parm(cam_id, MM_CAMERA_PARM_EXPOSURE_COMPENSATION, value);
+#endif
+  return 0;
+}
+
+/*===========================================================================
+ * FUNCTION     - increase_saturation -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int increase_saturation (mm_camera_lib_handle *lib_handle) {
+#if 0
+  saturation += CAMERA_SATURATION_STEP;
+  if (saturation > CAMERA_MAX_SATURATION) {
+    saturation = CAMERA_MAX_SATURATION;
+    printf("Reached max saturation. \n");
+  }
+  printf("Increase Saturation to %d\n", saturation);
+  return mm_app_set_config_parm(cam_id, MM_CAMERA_PARM_SATURATION, saturation);
+#endif
+  saturation += CAMERA_SATURATION_STEP;
+  if (saturation > CAMERA_MAX_SATURATION) {
+    saturation = CAMERA_MAX_SATURATION;
+    printf("Reached max saturation. \n");
+  }
+  printf("Increase saturation to %d\n", contrast);
+  return mm_camera_lib_send_command(lib_handle,
+                                       MM_CAMERA_LIB_SATURATION,
+                                       &saturation,
+                                       NULL);
+}
+
+/*===========================================================================
+ * FUNCTION     - decrease_saturation -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int decrease_saturation (mm_camera_lib_handle *lib_handle) {
+#if 0
+  saturation -= CAMERA_SATURATION_STEP;
+  if (saturation < CAMERA_MIN_SATURATION) {
+    saturation = CAMERA_MIN_SATURATION;
+    printf("Reached min saturation. \n");
+  }
+  printf("Dcrease Saturation to %d\n", saturation);
+  return mm_app_set_config_parm(cam_id, MM_CAMERA_PARM_SATURATION, saturation);
+#endif
+  saturation -= CAMERA_SATURATION_STEP;
+  if (saturation < CAMERA_MIN_SATURATION) {
+    saturation = CAMERA_MIN_SATURATION;
+    printf("Reached min saturation. \n");
+  }
+  printf("decrease saturation to %d\n", contrast);
+  return mm_camera_lib_send_command(lib_handle,
+                                       MM_CAMERA_LIB_SATURATION,
+                                       &saturation,
+                                       NULL);
+}
+
+
+int take_jpeg_snapshot(mm_camera_test_obj_t *test_obj, int is_burst_mode)
+{
+  CDBG_HIGH("\nEnter take_jpeg_snapshot!!\n");
+  int rc = mm_app_take_picture (test_obj, (uint8_t)is_burst_mode);
+  if (MM_CAMERA_OK != rc) {
+    CDBG_ERROR("%s: mm_app_take_picture() err=%d\n", __func__, rc);
+  }
+  return rc;
+}
+
+/*===========================================================================
+ * FUNCTION    - main -
+ *
+ * DESCRIPTION:
+ *==========================================================================*/
+int main()
+{
+    char tc_buf[3];
+    int mode = 0;
+    int rc = 0;
+
+    printf("Please Select Execution Mode:\n");
+    printf("0: Menu Based 1: Regression\n");
+    fgets(tc_buf, 3, stdin);
+    mode = tc_buf[0] - '0';
+    if(mode == 0) {
+      printf("\nStarting Menu based!!\n");
+    } else if(mode == 1) {
+      printf("Starting Regression testing!!\n");
+      if(!mm_app_start_regression_test(1)) {
+         printf("\nRegressiion test passed!!\n");
+         return 0;
+      } else {
+        printf("\nRegression test failed!!\n");
+        exit(-1);
+      }
+    } else {
+       printf("\nPlease Enter 0 or 1\n");
+       printf("\nExisting the App!!\n");
+       exit(-1);
+    }
+
+
+    rc = submain();
+
+    printf("Exiting application\n");
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION     - set_whitebalance -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int set_whitebalance (mm_camera_lib_handle *lib_handle, int wb_action_param) {
+        cam_wb_mode_type type = 0;
+        switch (wb_action_param) {
+                case WB_AUTO:
+                        printf("\n WB_AUTO\n");
+                        type = CAM_WB_MODE_AUTO;
+                        break;
+                case WB_INCANDESCENT:
+                        printf("\n WB_INCANDESCENT\n");
+                        type = CAM_WB_MODE_INCANDESCENT;
+                        break;
+                case WB_FLUORESCENT:
+                        printf("\n WB_FLUORESCENT\n");
+                        type = CAM_WB_MODE_FLUORESCENT;
+                        break;
+                case WB_WARM_FLUORESCENT:
+                        printf("\n WB_WARM_FLUORESCENT\n");
+                        type = CAM_WB_MODE_WARM_FLUORESCENT;
+                        break;
+                case WB_DAYLIGHT:
+                        printf("\n WB_DAYLIGHT\n");
+                        type = CAM_WB_MODE_DAYLIGHT;
+                        break;
+                case WB_CLOUDY_DAYLIGHT:
+                        printf("\n WB_CLOUDY_DAYLIGHT\n");
+                        type = CAM_WB_MODE_CLOUDY_DAYLIGHT;
+                        break;
+               case WB_TWILIGHT:
+                        printf("\n WB_TWILIGHT\n");
+                        type = CAM_WB_MODE_TWILIGHT;
+                        break;
+               case WB_SHADE:
+                        printf("\n WB_SHADE\n");
+                        type = CAM_WB_MODE_SHADE;
+                        break;
+                default:
+                        break;
+        }
+        return mm_camera_lib_send_command(lib_handle,
+                                          MM_CAMERA_LIB_WB,
+                                          &type,
+                                          NULL);
+}
+
+
+/*===========================================================================
+ * FUNCTION     - set_exp_metering -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int set_exp_metering (mm_camera_lib_handle *lib_handle, int exp_metering_action_param) {
+        cam_auto_exposure_mode_type type = 0;
+        switch (exp_metering_action_param) {
+		case AUTO_EXP_FRAME_AVG:
+                        printf("\nAUTO_EXP_FRAME_AVG\n");
+                        type = CAM_AEC_MODE_FRAME_AVERAGE;
+                        break;
+                case AUTO_EXP_CENTER_WEIGHTED:
+                        printf("\n AUTO_EXP_CENTER_WEIGHTED\n");
+                        type = CAM_AEC_MODE_CENTER_WEIGHTED;
+                        break;
+                case AUTO_EXP_SPOT_METERING:
+                        printf("\n AUTO_EXP_SPOT_METERING\n");
+                        type = CAM_AEC_MODE_SPOT_METERING;
+                        break;
+                case AUTO_EXP_SMART_METERING:
+                        printf("\n AUTO_EXP_SMART_METERING\n");
+                        type = CAM_AEC_MODE_SMART_METERING;
+                        break;
+                case AUTO_EXP_USER_METERING:
+                        printf("\n AUTO_EXP_USER_METERING\n");
+                        type = CAM_AEC_MODE_USER_METERING;
+                        break;
+                case AUTO_EXP_SPOT_METERING_ADV:
+                        printf("\n AUTO_EXP_SPOT_METERING_ADV\n");
+                        type = CAM_AEC_MODE_SPOT_METERING_ADV;
+                        break;
+                case AUTO_EXP_CENTER_WEIGHTED_ADV:
+                        printf("\n AUTO_EXP_CENTER_WEIGHTED_ADV\n");
+                        type = CAM_AEC_MODE_CENTER_WEIGHTED_ADV;
+                        break;
+                default:
+                        break;
+        }
+        return mm_camera_lib_send_command(lib_handle,
+                                          MM_CAMERA_LIB_EXPOSURE_METERING,
+                                          &type,
+                                          NULL);
+}
+
+int get_ctrl_value (int ctrl_value_mode_param){
+#if 0
+    int rc = 0;
+    struct v4l2_control ctrl;
+
+    if (ctrl_value_mode_param == WHITE_BALANCE_STATE) {
+        printf("You chose WHITE_BALANCE_STATE\n");
+        ctrl.id = V4L2_CID_AUTO_WHITE_BALANCE;
+    }
+    else if (ctrl_value_mode_param == WHITE_BALANCE_TEMPERATURE) {
+        printf("You chose WHITE_BALANCE_TEMPERATURE\n");
+        ctrl.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE;
+    }
+    else if (ctrl_value_mode_param == BRIGHTNESS_CTRL) {
+        printf("You chose brightness value\n");
+        ctrl.id = V4L2_CID_BRIGHTNESS;
+    }
+    else if (ctrl_value_mode_param == EV) {
+        printf("You chose exposure value\n");
+        ctrl.id = V4L2_CID_EXPOSURE;
+    }
+    else if (ctrl_value_mode_param == CONTRAST_CTRL) {
+        printf("You chose contrast value\n");
+        ctrl.id = V4L2_CID_CONTRAST;
+    }
+    else if (ctrl_value_mode_param == SATURATION_CTRL) {
+        printf("You chose saturation value\n");
+        ctrl.id = V4L2_CID_SATURATION;
+    } else if (ctrl_value_mode_param == SHARPNESS_CTRL) {
+        printf("You chose sharpness value\n");
+        ctrl.id = V4L2_CID_SHARPNESS;
+    }
+
+  //  rc = ioctl(camfd, VIDIOC_G_CTRL, &ctrl);
+    return rc;
+#endif
+  return ctrl_value_mode_param;
+}
+
+/*===========================================================================
+ * FUNCTION     - toggle_afr -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int toggle_afr () {
+#if 0
+    if (fps_mode == FPS_MODE_AUTO) {
+        printf("\nSetting FPS_MODE_FIXED\n");
+        fps_mode = FPS_MODE_FIXED;
+    } else {
+        printf("\nSetting FPS_MODE_AUTO\n");
+        fps_mode = FPS_MODE_AUTO;
+    }
+    return mm_app_set_config_parm(cam_id, MM_CAMERA_PARM_FPS_MODE, fps_mode);
+#endif
+  return 0;
+}
+
+int set_zoom (mm_camera_lib_handle *lib_handle, int zoom_action_param) {
+
+    if (zoom_action_param == ZOOM_IN) {
+        zoom_level += ZOOM_STEP;
+        if (zoom_level > zoom_max_value)
+            zoom_level = zoom_max_value;
+    } else if (zoom_action_param == ZOOM_OUT) {
+        zoom_level -= ZOOM_STEP;
+        if (zoom_level < ZOOM_MIN_VALUE)
+            zoom_level = ZOOM_MIN_VALUE;
+    } else {
+        CDBG("%s: Invalid zoom_action_param value\n", __func__);
+        return -EINVAL;
+    }
+    return mm_camera_lib_send_command(lib_handle,
+                                      MM_CAMERA_LIB_ZOOM,
+                                      &zoom_level,
+                                      NULL);
+}
+
+/*===========================================================================
+ * FUNCTION     - set_iso -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int set_iso (mm_camera_lib_handle *lib_handle, int iso_action_param) {
+    cam_iso_mode_type type = 0;
+    switch (iso_action_param) {
+        case ISO_AUTO:
+            printf("\n ISO_AUTO\n");
+            type = CAM_ISO_MODE_AUTO;
+            break;
+        case ISO_DEBLUR:
+            printf("\n ISO_DEBLUR\n");
+            type = CAM_ISO_MODE_DEBLUR;
+            break;
+        case ISO_100:
+            printf("\n ISO_100\n");
+            type = CAM_ISO_MODE_100;
+            break;
+        case ISO_200:
+            printf("\n ISO_200\n");
+            type = CAM_ISO_MODE_200;
+            break;
+        case ISO_400:
+            printf("\n ISO_400\n");
+            type = CAM_ISO_MODE_400;
+            break;
+        case ISO_800:
+            printf("\n ISO_800\n");
+            type = CAM_ISO_MODE_800;
+            break;
+        case ISO_1600:
+            printf("\n ISO_1600\n");
+            type = CAM_ISO_MODE_1600;
+            break;
+        default:
+            break;
+    }
+    return mm_camera_lib_send_command(lib_handle,
+                                      MM_CAMERA_LIB_ISO,
+                                      &type,
+                                      NULL);
+}
+
+/*===========================================================================
+ * FUNCTION     - increase_sharpness -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int increase_sharpness (mm_camera_lib_handle *lib_handle) {
+    sharpness += CAMERA_SHARPNESS_STEP;
+    if (sharpness > CAMERA_MAX_SHARPNESS) {
+        sharpness = CAMERA_MAX_SHARPNESS;
+        printf("Reached max SHARPNESS. \n");
+    }
+    printf("Increase Sharpness to %d\n", sharpness);
+    return mm_camera_lib_send_command(lib_handle,
+                                      MM_CAMERA_LIB_SHARPNESS,
+                                      &sharpness,
+                                      NULL);
+}
+
+/*===========================================================================
+ * FUNCTION     - decrease_sharpness -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int decrease_sharpness (mm_camera_lib_handle *lib_handle) {
+    sharpness -= CAMERA_SHARPNESS_STEP;
+    if (sharpness < CAMERA_MIN_SHARPNESS) {
+        sharpness = CAMERA_MIN_SHARPNESS;
+        printf("Reached min SHARPNESS. \n");
+    }
+    printf("Decrease Sharpness to %d\n", sharpness);
+    return mm_camera_lib_send_command(lib_handle,
+                                      MM_CAMERA_LIB_SHARPNESS,
+                                      &sharpness,
+                                      NULL);
+}
+
+int set_flash_mode (mm_camera_lib_handle *lib_handle, int action_param) {
+    cam_flash_mode_t type = 0;
+    switch (action_param) {
+        case FLASH_MODE_OFF:
+            printf("\n FLASH_MODE_OFF\n");
+            type = CAM_FLASH_MODE_OFF;
+            break;
+        case FLASH_MODE_AUTO:
+            printf("\n FLASH_MODE_AUTO\n");
+            type = CAM_FLASH_MODE_AUTO;
+            break;
+        case FLASH_MODE_ON:
+            printf("\n FLASH_MODE_ON\n");
+            type = CAM_FLASH_MODE_ON;
+            break;
+        case FLASH_MODE_TORCH:
+            printf("\n FLASH_MODE_TORCH\n");
+            type = CAM_ISO_MODE_100;
+            break;
+        default:
+            break;
+    }
+    return mm_camera_lib_send_command(lib_handle,
+                                      MM_CAMERA_LIB_FLASH,
+                                      &type,
+                                      NULL);
+}
+
+int set_bestshot_mode(mm_camera_lib_handle *lib_handle, int action_param) {
+    cam_scene_mode_type type = 0;
+    switch (action_param) {
+       case BESTSHOT_AUTO:
+            printf("\n BEST SHOT AUTO\n");
+            type = CAM_SCENE_MODE_OFF;
+            break;
+        case BESTSHOT_ACTION:
+            printf("\n BEST SHOT ACTION\n");
+            type = CAM_SCENE_MODE_ACTION;
+            break;
+        case BESTSHOT_PORTRAIT:
+            printf("\n BEST SHOT PORTRAIT\n");
+            type = CAM_SCENE_MODE_PORTRAIT;
+            break;
+        case BESTSHOT_LANDSCAPE:
+            printf("\n BEST SHOT LANDSCAPE\n");
+            type = CAM_SCENE_MODE_LANDSCAPE;
+            break;
+        case BESTSHOT_NIGHT:
+            printf("\n BEST SHOT NIGHT\n");
+            type = CAM_SCENE_MODE_NIGHT;
+            break;
+        case BESTSHOT_NIGHT_PORTRAIT:
+            printf("\n BEST SHOT NIGHT PORTRAIT\n");
+            type = CAM_SCENE_MODE_NIGHT_PORTRAIT;
+            break;
+        case BESTSHOT_THEATRE:
+            printf("\n BEST SHOT THREATRE\n");
+            type = CAM_SCENE_MODE_THEATRE;
+            break;
+        case BESTSHOT_BEACH:
+            printf("\n BEST SHOT BEACH\n");
+            type = CAM_SCENE_MODE_BEACH;
+            break;
+        case BESTSHOT_SNOW:
+            printf("\n BEST SHOT SNOW\n");
+            type = CAM_SCENE_MODE_SNOW;
+            break;
+        case BESTSHOT_SUNSET:
+            printf("\n BEST SHOT SUNSET\n");
+            type = CAM_SCENE_MODE_SUNSET;
+            break;
+        case BESTSHOT_ANTISHAKE:
+            printf("\n BEST SHOT ANTISHAKE\n");
+            type = CAM_SCENE_MODE_ANTISHAKE;
+            break;
+        case BESTSHOT_FIREWORKS:
+            printf("\n BEST SHOT FIREWORKS\n");
+            type = CAM_SCENE_MODE_FIREWORKS;
+            break;
+        case BESTSHOT_SPORTS:
+            printf("\n BEST SHOT SPORTS\n");
+            type = CAM_SCENE_MODE_SPORTS;
+            break;
+        case BESTSHOT_PARTY:
+            printf("\n BEST SHOT PARTY\n");
+            type = CAM_SCENE_MODE_PARTY;
+            break;
+        case BESTSHOT_CANDLELIGHT:
+            printf("\n BEST SHOT CANDLELIGHT\n");
+            type = CAM_SCENE_MODE_CANDLELIGHT;
+            break;
+        case BESTSHOT_ASD:
+            printf("\n BEST SHOT ASD\n");
+            type = CAM_SCENE_MODE_AUTO;
+            break;
+        case BESTSHOT_BACKLIGHT:
+            printf("\n BEST SHOT BACKLIGHT\n");
+            type = CAM_SCENE_MODE_BACKLIGHT;
+            break;
+        case BESTSHOT_FLOWERS:
+            printf("\n BEST SHOT FLOWERS\n");
+            type = CAM_SCENE_MODE_FLOWERS;
+            break;
+        case BESTSHOT_AR:
+            printf("\n BEST SHOT AR\n");
+            type = CAM_SCENE_MODE_AR;
+            break;
+        case BESTSHOT_HDR:
+            printf("\n BEST SHOT HDR\n");
+            type = CAM_SCENE_MODE_OFF;
+            break;
+        default:
+            break;
+        }
+        return mm_camera_lib_send_command(lib_handle,
+                                          MM_CAMERA_LIB_BESTSHOT,
+                                          &type,
+                                          NULL);
+}
+/*===========================================================================
+ * FUNCTION     - print_current_menu -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+int print_current_menu (menu_id_change_t current_menu_id) {
+  if (current_menu_id == MENU_ID_MAIN) {
+    print_menu_preview_video ();
+  } else if (current_menu_id == MENU_ID_WHITEBALANCECHANGE) {
+    camera_preview_video_wb_change_tbl();
+  } else if (current_menu_id == MENU_ID_EXPMETERINGCHANGE) {
+    camera_preview_video_exp_metering_change_tbl();
+  } else if (current_menu_id == MENU_ID_GET_CTRL_VALUE) {
+    camera_preview_video_get_ctrl_value_tbl();
+  } else if (current_menu_id == MENU_ID_ISOCHANGE) {
+    camera_preview_video_iso_change_tbl();
+  } else if (current_menu_id == MENU_ID_BRIGHTNESSCHANGE) {
+    camera_brightness_change_tbl ();
+  } else if (current_menu_id == MENU_ID_CONTRASTCHANGE) {
+    camera_contrast_change_tbl ();
+  } else if (current_menu_id == MENU_ID_EVCHANGE) {
+    camera_EV_change_tbl ();
+  } else if (current_menu_id == MENU_ID_SATURATIONCHANGE) {
+    camera_saturation_change_tbl ();
+  } else if (current_menu_id == MENU_ID_ZOOMCHANGE) {
+    camera_preview_video_zoom_change_tbl();
+  } else if (current_menu_id == MENU_ID_SHARPNESSCHANGE) {
+    camera_preview_video_sharpness_change_tbl();
+  } else if (current_menu_id == MENU_ID_BESTSHOT) {
+    camera_set_bestshot_tbl();
+  } else if (current_menu_id == MENU_ID_FLASHMODE) {
+    camera_set_flashmode_tbl();
+  } else if (current_menu_id == MENU_ID_SENSORS ) {
+    camera_sensors_tbl();
+  } else if (current_menu_id == MENU_ID_SWITCH_RES ) {
+    camera_resolution_change_tbl();
+  }
+
+  return 0;
+}
+
+int filter_resolutions(mm_camera_lib_handle *lib_handle,
+                       DIMENSION_TBL_T *tbl,
+                       size_t tbl_size)
+{
+    size_t i, j;
+    cam_capability_t camera_cap;
+    int rc = 0;
+
+    if ( ( NULL == lib_handle ) || ( NULL == tbl ) ) {
+        return -1;
+    }
+
+    rc = mm_camera_lib_get_caps(lib_handle, &camera_cap);
+    if ( MM_CAMERA_OK != rc ) {
+        CDBG_ERROR("%s:mm_camera_lib_get_caps() err=%d\n", __func__, rc);
+        return -1;
+    }
+
+    for( i = 0 ; i < tbl_size ; i++ ) {
+        for( j = 0; j < camera_cap.picture_sizes_tbl_cnt; j++ ) {
+            if ( ( tbl[i].width == camera_cap.picture_sizes_tbl[j].width ) &&
+                 ( tbl[i].height == camera_cap.picture_sizes_tbl[j].height ) ) {
+                tbl[i].supported = 1;
+                rc = (int)i;
+                break;
+            }
+        }
+    }
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : enableAFR
+ *
+ * DESCRIPTION: This function will go through the list
+ *              of supported FPS ranges and select the
+ *              one which has maximum range
+ *
+ * PARAMETERS :
+ *   @lib_handle   : camera test library handle
+ *
+ * RETURN     : uint32_t type of stream handle
+ *              MM_CAMERA_OK  -- Success
+ *              !=MM_CAMERA_OK -- Error status
+ *==========================================================================*/
+int enableAFR(mm_camera_lib_handle *lib_handle)
+{
+    size_t i, j;
+    float max_range = 0.0f;
+    cam_capability_t cap;
+    int rc = MM_CAMERA_OK;
+
+    if ( NULL == lib_handle ) {
+        return MM_CAMERA_E_INVALID_INPUT;
+    }
+
+    rc = mm_camera_lib_get_caps(lib_handle, &cap);
+    if ( MM_CAMERA_OK != rc ) {
+        CDBG_ERROR("%s:mm_camera_lib_get_caps() err=%d\n", __func__, rc);
+        return rc;
+    }
+
+    for( i = 0, j = 0 ; i < cap.fps_ranges_tbl_cnt ; i++ ) {
+        if ( max_range < (cap.fps_ranges_tbl[i].max_fps - cap.fps_ranges_tbl[i].min_fps) ) {
+            j = i;
+        }
+    }
+
+    rc = mm_camera_lib_send_command(lib_handle,
+                                    MM_CAMERA_LIB_FPS_RANGE,
+                                    &cap.fps_ranges_tbl[j],
+                                    NULL);
+
+    CDBG_ERROR("%s : FPS range [%5.2f:%5.2f] rc = %d",
+              __func__,
+              cap.fps_ranges_tbl[j].min_fps,
+              cap.fps_ranges_tbl[j].max_fps,
+              rc);
+
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION     - submain -
+ *
+ * DESCRIPTION:
+ * ===========================================================================*/
+static int submain()
+{
+    int rc = 0;
+    char tc_buf[3];
+    menu_id_change_t current_menu_id = MENU_ID_MAIN, next_menu_id;
+    camera_action_t action_id;
+    int action_param;
+    uint8_t previewing = 0;
+    int isZSL = 0;
+    uint8_t wnr_enabled = 0;
+    mm_camera_lib_handle lib_handle;
+    int num_cameras;
+    int available_sensors =
+        (int)(sizeof(sensor_tbl) / sizeof(sensor_tbl[0]));
+    int available_snap_sizes =
+        (int)(sizeof(dimension_tbl)/sizeof(dimension_tbl[0]));
+    int i,c;
+    mm_camera_lib_snapshot_params snap_dim;
+    snap_dim.width = DEFAULT_SNAPSHOT_WIDTH;
+    snap_dim.height = DEFAULT_SNAPSHOT_HEIGHT;
+    cam_scene_mode_type default_scene= CAM_SCENE_MODE_OFF;
+    int set_tintless= 0;
+
+    mm_camera_test_obj_t test_obj;
+    memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+
+    rc = mm_camera_lib_open(&lib_handle, 0);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s:mm_camera_lib_open() err=%d\n", __func__, rc);
+        return -1;
+    }
+
+    num_cameras = mm_camera_lib_number_of_cameras(&lib_handle);
+    if ( 0 >= num_cameras ) {
+        CDBG_ERROR("%s: No camera sensors reported!", __func__);
+        rc = -1;
+        goto ERROR;
+    } else if ( 1 <= num_cameras ) {
+        c = MIN(num_cameras, available_sensors);
+        for ( i = 0 ; i < c ; i++ ) {
+            sensor_tbl[i].present = 1;
+        }
+        current_menu_id = MENU_ID_SENSORS;
+    } else {
+        i = filter_resolutions(&lib_handle,
+                                dimension_tbl,
+                                (size_t)available_snap_sizes);
+        if ( ( i < 0 ) || ( i >= available_snap_sizes ) ) {
+            CDBG_ERROR("%s:filter_resolutions()\n", __func__);
+            goto ERROR;
+        }
+        snap_dim.width = dimension_tbl[i].width;
+        snap_dim.height = dimension_tbl[i].height;
+
+        rc = enableAFR(&lib_handle);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:enableAFR() err=%d\n", __func__, rc);
+            goto ERROR;
+        }
+
+        rc =  mm_camera_lib_send_command(&lib_handle,
+                                         MM_CAMERA_LIB_BESTSHOT,
+                                         &default_scene,
+                                         NULL);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+            goto ERROR;
+        }
+    }
+    /*start the eztune server*/
+    CDBG_HIGH("Starting eztune Server \n");
+    eztune_server_start(&lib_handle);
+
+    do {
+        print_current_menu (current_menu_id);
+        fgets(tc_buf, 3, stdin);
+
+        next_menu_id = next_menu(current_menu_id, tc_buf[0], & action_id, & action_param);
+
+        if (next_menu_id != MENU_ID_INVALID) {
+          current_menu_id = next_menu_id;
+        }
+        if (action_id == ACTION_NO_ACTION) {
+          continue;
+        }
+
+        switch(action_id) {
+            case ACTION_START_PREVIEW:
+                CDBG_ERROR("ACTION_START_PREVIEW \n");
+                rc = mm_camera_lib_start_stream(&lib_handle);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_start_stream() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+                previewing = 1;
+                break;
+
+            case ACTION_STOP_PREVIEW:
+                CDBG("ACTION_STOP_PREVIEW \n");
+                rc = mm_camera_lib_stop_stream(&lib_handle);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_stop_stream() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+                previewing = 0;
+                break;
+
+            case ACTION_SET_WHITE_BALANCE:
+                CDBG("Selection for the White Balance changes\n");
+                set_whitebalance(&lib_handle, action_param);
+                break;
+
+            case ACTION_SET_TINTLESS_ENABLE:
+                CDBG("Selection for the Tintless enable changes\n");
+                set_tintless = 1;
+                rc =  mm_camera_lib_send_command(&lib_handle,
+                                                 MM_CAMERA_LIB_SET_TINTLESS,
+                                                 &set_tintless,
+                                                 NULL);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+                break;
+
+            case ACTION_SET_TINTLESS_DISABLE:
+                CDBG("Selection for the Tintless disable changes\n");
+                set_tintless = 0;
+                rc =  mm_camera_lib_send_command(&lib_handle,
+                                                 MM_CAMERA_LIB_SET_TINTLESS,
+                                                 &set_tintless,
+                                                 NULL);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+                break;
+
+            case ACTION_SET_EXP_METERING:
+                CDBG("Selection for the Exposure Metering changes\n");
+                set_exp_metering(&lib_handle, action_param);
+                break;
+
+            case ACTION_GET_CTRL_VALUE:
+                CDBG("Selection for getting control value\n");
+                get_ctrl_value(action_param);
+                break;
+
+            case ACTION_BRIGHTNESS_INCREASE:
+                printf("Increase brightness\n");
+                increase_brightness(&lib_handle);
+                break;
+
+            case ACTION_BRIGHTNESS_DECREASE:
+                printf("Decrease brightness\n");
+                decrease_brightness(&lib_handle);
+                break;
+
+            case ACTION_CONTRAST_INCREASE:
+                CDBG("Selection for the contrast increase\n");
+                increase_contrast (&lib_handle);
+                break;
+
+            case ACTION_CONTRAST_DECREASE:
+                CDBG("Selection for the contrast decrease\n");
+                decrease_contrast (&lib_handle);
+                break;
+
+            case ACTION_EV_INCREASE:
+                CDBG("Selection for the EV increase\n");
+                increase_EV ();
+                break;
+
+            case ACTION_EV_DECREASE:
+                CDBG("Selection for the EV decrease\n");
+                decrease_EV ();
+                break;
+
+            case ACTION_SATURATION_INCREASE:
+                CDBG("Selection for the EV increase\n");
+                increase_saturation (&lib_handle);
+                break;
+
+            case ACTION_SATURATION_DECREASE:
+                CDBG("Selection for the EV decrease\n");
+                decrease_saturation (&lib_handle);
+                break;
+
+            case ACTION_TOGGLE_AFR:
+                CDBG("Select for auto frame rate toggling\n");
+                toggle_afr();
+                break;
+
+            case ACTION_SET_ISO:
+                CDBG("Select for ISO changes\n");
+                set_iso(&lib_handle, action_param);
+                break;
+
+            case ACTION_SET_ZOOM:
+                CDBG("Selection for the zoom direction changes\n");
+                set_zoom(&lib_handle, action_param);
+                break;
+
+            case ACTION_SHARPNESS_INCREASE:
+                CDBG("Selection for sharpness increase\n");
+                increase_sharpness(&lib_handle);
+                break;
+
+            case ACTION_SHARPNESS_DECREASE:
+                CDBG("Selection for sharpness decrease\n");
+                decrease_sharpness(&lib_handle);
+                break;
+
+            case ACTION_SET_BESTSHOT_MODE:
+                CDBG("Selection for bestshot\n");
+                set_bestshot_mode(&lib_handle, action_param);
+                break;
+
+            case ACTION_SET_FLASH_MODE:
+                printf("\n Selection for flashmode\n");
+                set_flash_mode(&lib_handle, action_param);
+                break;
+
+            case ACTION_SWITCH_CAMERA:
+                rc = mm_camera_lib_close(&lib_handle);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_close() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+
+                rc = mm_camera_lib_open(&lib_handle, action_param);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_open() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+
+                i = filter_resolutions(&lib_handle,
+                                        dimension_tbl,
+                                        sizeof(dimension_tbl)/sizeof(dimension_tbl[0]));
+                if ( ( i < 0 ) || ( i >=  available_snap_sizes ) ) {
+                    CDBG_ERROR("%s:filter_resolutions()\n", __func__);
+                    goto ERROR;
+                }
+                snap_dim.width = dimension_tbl[i].width;
+                snap_dim.height = dimension_tbl[i].height;
+
+                rc = enableAFR(&lib_handle);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:enableAFR() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+
+                rc =  mm_camera_lib_send_command(&lib_handle,
+                                                 MM_CAMERA_LIB_BESTSHOT,
+                                                 &default_scene,
+                                                 NULL);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+                break;
+
+            case ACTION_TOGGLE_ZSL:
+                printf("ZSL Toggle !!!\n");
+                isZSL = !isZSL;
+                if ( isZSL ) {
+                    printf("ZSL on !!!\n");
+                } else {
+                    printf("ZSL off !!!\n");
+                }
+                rc = mm_camera_lib_send_command(&lib_handle,
+                                                MM_CAMERA_LIB_ZSL_ENABLE,
+                                                &isZSL,
+                                                NULL);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+                break;
+
+            case ACTION_TAKE_RAW_SNAPSHOT:
+                CDBG_HIGH("\n Take RAW snapshot\n");
+
+                rc = mm_camera_lib_send_command(&lib_handle,
+                                                MM_CAMERA_LIB_DO_AF,
+                                                NULL,
+                                                NULL);
+
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+
+                rc = mm_camera_lib_send_command(&lib_handle,
+                                                MM_CAMERA_LIB_RAW_CAPTURE,
+                                                NULL,
+                                                NULL);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+                break;
+
+            case ACTION_TAKE_JPEG_SNAPSHOT:
+                CDBG_HIGH("\n Take JPEG snapshot\n");
+
+                rc = mm_camera_lib_send_command(&lib_handle,
+                                                MM_CAMERA_LIB_DO_AF,
+                                                NULL,
+                                                NULL);
+
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+
+                rc = mm_camera_lib_send_command(&lib_handle,
+                                                MM_CAMERA_LIB_JPEG_CAPTURE,
+                                                &snap_dim,
+                                                NULL);
+                if (rc != MM_CAMERA_OK) {
+                    CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+                    goto ERROR;
+                }
+                break;
+            case ACTION_SWITCH_RESOLUTION:
+                printf("\n Switch snapshot resolution to %dx%d\n",
+                       dimension_tbl[action_param].width,
+                       dimension_tbl[action_param].height);
+                snap_dim.width = dimension_tbl[action_param].width;
+                snap_dim.height = dimension_tbl[action_param].height;
+                break;
+
+      case ACTION_START_RECORDING:
+        CDBG("Start recording action\n");
+#if 0
+        if (mm_app_start_video(cam_id) < 0)
+          goto ERROR;
+        is_rec = 1;
+#endif
+        break;
+      case ACTION_STOP_RECORDING:
+        CDBG("Stop recording action\n");
+#if 0
+        if(is_rec) {
+          if (mm_app_stop_video(cam_id) < 0)
+            goto ERROR;
+          is_rec = 0;
+        }
+#endif
+        break;
+      case ACTION_TAKE_LIVE_SNAPSHOT:
+        printf("Selection for live shot\n");
+#if 0
+        if(is_rec)
+           mm_app_take_live_snapshot(cam_id);
+        else
+           printf("\n !!! Use live snapshot option while recording only !!!\n");
+#endif
+        break;
+
+        case ACTION_TOGGLE_WNR:
+          wnr_enabled = !wnr_enabled;
+          printf("WNR Enabled = %d\n", wnr_enabled);
+          rc = mm_camera_lib_send_command(&lib_handle,
+                                          MM_CAMERA_LIB_WNR_ENABLE,
+                                          &wnr_enabled,
+                                          NULL);
+          if (rc != MM_CAMERA_OK) {
+              CDBG_ERROR("%s:mm_camera_lib_send_command() err=%d\n", __func__, rc);
+              goto ERROR;
+          }
+          break;
+
+        case ACTION_EXIT:
+            printf("Exiting....\n");
+            break;
+        case ACTION_NO_ACTION:
+            printf("Go back to main menu");
+            break;
+
+        default:
+            printf("\n\n!!!!!WRONG INPUT: %d!!!!\n", action_id);
+            break;
+    }
+
+    usleep(1000 * 1000);
+    CDBG("action_id = %d\n", action_id);
+
+  } while (action_id != ACTION_EXIT);
+  action_id = ACTION_NO_ACTION;
+
+    mm_camera_lib_close(&lib_handle);
+    return 0;
+
+ERROR:
+
+    mm_camera_lib_close(&lib_handle);
+
+    return rc;
+}
+
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_preview.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_preview.c
new file mode 100644
index 0000000..4f6cf86
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_preview.c
@@ -0,0 +1,991 @@
+/*
+Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "mm_qcamera_dbg.h"
+#include "mm_qcamera_app.h"
+#include <assert.h>
+#include <sys/mman.h>
+#include <semaphore.h>
+
+static void mm_app_metadata_notify_cb(mm_camera_super_buf_t *bufs,
+                                     void *user_data)
+{
+  uint32_t i = 0;
+  mm_camera_channel_t *channel = NULL;
+  mm_camera_stream_t *p_stream = NULL;
+  mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+  mm_camera_buf_def_t *frame;
+  metadata_buffer_t *pMetadata;
+
+  if (NULL == bufs || NULL == user_data) {
+      CDBG_ERROR("%s: bufs or user_data are not valid ", __func__);
+      return;
+  }
+  frame = bufs->bufs[0];
+
+  /* find channel */
+  for (i = 0; i < MM_CHANNEL_TYPE_MAX; i++) {
+      if (pme->channels[i].ch_id == bufs->ch_id) {
+          channel = &pme->channels[i];
+          break;
+      }
+  }
+
+  if (NULL == channel) {
+      CDBG_ERROR("%s: Channel object is NULL ", __func__);
+      return;
+  }
+
+  /* find preview stream */
+  for (i = 0; i < channel->num_streams; i++) {
+      if (channel->streams[i].s_config.stream_info->stream_type == CAM_STREAM_TYPE_METADATA) {
+          p_stream = &channel->streams[i];
+          break;
+      }
+  }
+
+  if (NULL == p_stream) {
+      CDBG_ERROR("%s: cannot find metadata stream", __func__);
+      return;
+  }
+
+  /* find preview frame */
+  for (i = 0; i < bufs->num_bufs; i++) {
+      if (bufs->bufs[i]->stream_id == p_stream->s_id) {
+          frame = bufs->bufs[i];
+          break;
+      }
+  }
+
+  if (pme->metadata == NULL) {
+    /* The app will free the meta data, we don't need to bother here */
+    pme->metadata = malloc(sizeof(metadata_buffer_t));
+    if (NULL == pme->metadata) {
+        CDBG_ERROR("%s: Canot allocate metadata memory\n", __func__);
+        return;
+    }
+  }
+  memcpy(pme->metadata, frame->buffer, sizeof(metadata_buffer_t));
+
+  pMetadata = (metadata_buffer_t *)frame->buffer;
+  IF_META_AVAILABLE(cam_auto_focus_data_t, focus_data, CAM_INTF_META_AUTOFOCUS_DATA,
+        pMetadata) {
+    if (focus_data->focus_state == CAM_AF_FOCUSED ||
+      focus_data->focus_state == CAM_AF_NOT_FOCUSED) {
+      CDBG_ERROR("%s: AutoFocus Done Call Back Received\n",__func__);
+      mm_camera_app_done();
+    } else if (focus_data->focus_state == CAM_AF_NOT_FOCUSED) {
+      CDBG_ERROR("%s: AutoFocus failed\n",__func__);
+      mm_camera_app_done();
+    }
+  }
+
+  if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                          bufs->ch_id,
+                                          frame)) {
+      CDBG_ERROR("%s: Failed in Preview Qbuf\n", __func__);
+  }
+  mm_app_cache_ops((mm_camera_app_meminfo_t *)frame->mem_info,
+                   ION_IOC_INV_CACHES);
+}
+
+static void mm_app_preview_notify_cb(mm_camera_super_buf_t *bufs,
+                                     void *user_data)
+{
+    uint32_t i = 0;
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *p_stream = NULL;
+    mm_camera_buf_def_t *frame = NULL;
+    mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+
+    if (NULL == bufs || NULL == user_data) {
+        CDBG_ERROR("%s: bufs or user_data are not valid ", __func__);
+        return;
+    }
+
+    frame = bufs->bufs[0];
+
+    /* find channel */
+    for (i = 0; i < MM_CHANNEL_TYPE_MAX; i++) {
+        if (pme->channels[i].ch_id == bufs->ch_id) {
+            channel = &pme->channels[i];
+            break;
+        }
+    }
+    if (NULL == channel) {
+        CDBG_ERROR("%s: Channel object is NULL ", __func__);
+        return;
+    }
+    /* find preview stream */
+    for (i = 0; i < channel->num_streams; i++) {
+        if (channel->streams[i].s_config.stream_info->stream_type == CAM_STREAM_TYPE_PREVIEW) {
+            p_stream = &channel->streams[i];
+            break;
+        }
+    }
+
+    if (NULL == p_stream) {
+        CDBG_ERROR("%s: cannot find preview stream", __func__);
+        return;
+    }
+
+    /* find preview frame */
+    for (i = 0; i < bufs->num_bufs; i++) {
+        if (bufs->bufs[i]->stream_id == p_stream->s_id) {
+            frame = bufs->bufs[i];
+            break;
+        }
+    }
+
+    if ( 0 < pme->fb_fd ) {
+        mm_app_overlay_display(pme, frame->fd);
+    }
+#ifdef DUMP_PRV_IN_FILE
+    {
+        char file_name[64];
+        snprintf(file_name, sizeof(file_name), "P_C%d", pme->cam->camera_handle);
+        mm_app_dump_frame(frame, file_name, "yuv", frame->frame_idx);
+    }
+#endif
+    if (pme->user_preview_cb) {
+        CDBG_ERROR("[DBG] %s, user defined own preview cb. calling it...", __func__);
+        pme->user_preview_cb(frame);
+    }
+    if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                bufs->ch_id,
+                frame)) {
+        CDBG_ERROR("%s: Failed in Preview Qbuf\n", __func__);
+    }
+    mm_app_cache_ops((mm_camera_app_meminfo_t *)frame->mem_info,
+            ION_IOC_INV_CACHES);
+
+    CDBG("%s: END\n", __func__);
+}
+
+static void mm_app_zsl_notify_cb(mm_camera_super_buf_t *bufs,
+                                 void *user_data)
+{
+    int rc = MM_CAMERA_OK;
+    uint32_t i = 0;
+    mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *p_stream = NULL;
+    mm_camera_stream_t *m_stream = NULL;
+    mm_camera_stream_t *md_stream = NULL;
+    mm_camera_buf_def_t *p_frame = NULL;
+    mm_camera_buf_def_t *m_frame = NULL;
+    mm_camera_buf_def_t *md_frame = NULL;
+
+    CDBG("%s: BEGIN\n", __func__);
+
+    if (NULL == bufs || NULL == user_data) {
+        CDBG_ERROR("%s: bufs or user_data are not valid ", __func__);
+        return;
+    }
+
+    /* find channel */
+    for (i = 0; i < MM_CHANNEL_TYPE_MAX; i++) {
+        if (pme->channels[i].ch_id == bufs->ch_id) {
+            channel = &pme->channels[i];
+            break;
+        }
+    }
+    if (NULL == channel) {
+        CDBG_ERROR("%s: Wrong channel id (%d)", __func__, bufs->ch_id);
+        return;
+    }
+
+    /* find preview stream */
+    for (i = 0; i < channel->num_streams; i++) {
+        if (channel->streams[i].s_config.stream_info->stream_type == CAM_STREAM_TYPE_PREVIEW) {
+            p_stream = &channel->streams[i];
+            break;
+        }
+    }
+    if (NULL == p_stream) {
+        CDBG_ERROR("%s: cannot find preview stream", __func__);
+        return;
+    }
+
+    /* find snapshot stream */
+    for (i = 0; i < channel->num_streams; i++) {
+        if (channel->streams[i].s_config.stream_info->stream_type == CAM_STREAM_TYPE_SNAPSHOT) {
+            m_stream = &channel->streams[i];
+            break;
+        }
+    }
+    if (NULL == m_stream) {
+        CDBG_ERROR("%s: cannot find snapshot stream", __func__);
+        return;
+    }
+
+    /* find metadata stream */
+    for (i = 0; i < channel->num_streams; i++) {
+        if (channel->streams[i].s_config.stream_info->stream_type == CAM_STREAM_TYPE_METADATA) {
+            md_stream = &channel->streams[i];
+            break;
+        }
+    }
+    if (NULL == md_stream) {
+        CDBG_ERROR("%s: cannot find metadata stream", __func__);
+    }
+
+    /* find preview frame */
+    for (i = 0; i < bufs->num_bufs; i++) {
+        if (bufs->bufs[i]->stream_id == p_stream->s_id) {
+            p_frame = bufs->bufs[i];
+            break;
+        }
+    }
+
+    if(md_stream) {
+      /* find metadata frame */
+      for (i = 0; i < bufs->num_bufs; i++) {
+          if (bufs->bufs[i]->stream_id == md_stream->s_id) {
+              md_frame = bufs->bufs[i];
+              break;
+          }
+      }
+      if (!md_frame) {
+          ALOGE("%s: md_frame is null\n", __func__);
+          return;
+      }
+      if (!pme->metadata) {
+          /* App will free the metadata */
+          pme->metadata = malloc(sizeof(metadata_buffer_t));
+          if (!pme->metadata) {
+              ALOGE("%s: not enough memory\n", __func__);
+              return;
+          }
+      }
+
+      memcpy(pme->metadata , md_frame->buffer, sizeof(metadata_buffer_t));
+    }
+    /* find snapshot frame */
+    for (i = 0; i < bufs->num_bufs; i++) {
+        if (bufs->bufs[i]->stream_id == m_stream->s_id) {
+            m_frame = bufs->bufs[i];
+            break;
+        }
+    }
+
+    if (!m_frame || !p_frame) {
+        CDBG_ERROR("%s: cannot find preview/snapshot frame", __func__);
+        return;
+    }
+
+    CDBG("%s: ZSL CB with fb_fd = %d, m_frame = %p, p_frame = %p \n",
+         __func__,
+         pme->fb_fd,
+         m_frame,
+         p_frame);
+
+    if ( 0 < pme->fb_fd ) {
+        mm_app_overlay_display(pme, p_frame->fd);
+    }/* else {
+        mm_app_dump_frame(p_frame, "zsl_preview", "yuv", p_frame->frame_idx);
+        mm_app_dump_frame(m_frame, "zsl_main", "yuv", m_frame->frame_idx);
+    }*/
+
+    if ( pme->enable_reproc && ( NULL != pme->reproc_stream ) ) {
+
+        if (NULL != md_frame) {
+            rc = mm_app_do_reprocess(pme,
+                    m_frame,
+                    md_frame->buf_idx,
+                    bufs,
+                    md_stream);
+
+            if (MM_CAMERA_OK != rc ) {
+                CDBG_ERROR("%s: reprocess failed rc = %d", __func__, rc);
+            }
+        } else {
+            CDBG_ERROR("%s: md_frame is null\n", __func__);
+        }
+
+      return;
+    }
+
+    if ( pme->encodeJpeg ) {
+        pme->jpeg_buf.buf.buffer = (uint8_t *)malloc(m_frame->frame_len);
+        if ( NULL == pme->jpeg_buf.buf.buffer ) {
+            CDBG_ERROR("%s: error allocating jpeg output buffer", __func__);
+            goto exit;
+        }
+
+        pme->jpeg_buf.buf.frame_len = m_frame->frame_len;
+        /* create a new jpeg encoding session */
+        rc = createEncodingSession(pme, m_stream, m_frame);
+        if (0 != rc) {
+            CDBG_ERROR("%s: error creating jpeg session", __func__);
+            free(pme->jpeg_buf.buf.buffer);
+            goto exit;
+        }
+
+        /* start jpeg encoding job */
+        rc = encodeData(pme, bufs, m_stream);
+        pme->encodeJpeg = 0;
+    } else {
+        if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                                bufs->ch_id,
+                                                m_frame)) {
+            CDBG_ERROR("%s: Failed in main Qbuf\n", __func__);
+        }
+        mm_app_cache_ops((mm_camera_app_meminfo_t *)m_frame->mem_info,
+                         ION_IOC_INV_CACHES);
+    }
+
+exit:
+
+    if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                            bufs->ch_id,
+                                            p_frame)) {
+        CDBG_ERROR("%s: Failed in preview Qbuf\n", __func__);
+    }
+    mm_app_cache_ops((mm_camera_app_meminfo_t *)p_frame->mem_info,
+                     ION_IOC_INV_CACHES);
+
+    if(md_frame) {
+      if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                              bufs->ch_id,
+                                              md_frame)) {
+          CDBG_ERROR("%s: Failed in metadata Qbuf\n", __func__);
+      }
+      mm_app_cache_ops((mm_camera_app_meminfo_t *)md_frame->mem_info,
+                       ION_IOC_INV_CACHES);
+    }
+
+    CDBG("%s: END\n", __func__);
+}
+
+mm_camera_stream_t * mm_app_add_metadata_stream(mm_camera_test_obj_t *test_obj,
+                                               mm_camera_channel_t *channel,
+                                               mm_camera_buf_notify_t stream_cb,
+                                               void *userdata,
+                                               uint8_t num_bufs)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    cam_capability_t *cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+
+    stream = mm_app_add_stream(test_obj, channel);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        return NULL;
+    }
+
+    stream->s_config.mem_vtbl.get_bufs = mm_app_stream_initbuf;
+    stream->s_config.mem_vtbl.put_bufs = mm_app_stream_deinitbuf;
+    stream->s_config.mem_vtbl.clean_invalidate_buf =
+      mm_app_stream_clean_invalidate_buf;
+    stream->s_config.mem_vtbl.invalidate_buf = mm_app_stream_invalidate_buf;
+    stream->s_config.mem_vtbl.user_data = (void *)stream;
+    stream->s_config.stream_cb = stream_cb;
+    stream->s_config.userdata = userdata;
+    stream->num_of_bufs = num_bufs;
+
+    stream->s_config.stream_info = (cam_stream_info_t *)stream->s_info_buf.buf.buffer;
+    memset(stream->s_config.stream_info, 0, sizeof(cam_stream_info_t));
+    stream->s_config.stream_info->stream_type = CAM_STREAM_TYPE_METADATA;
+    stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    stream->s_config.stream_info->fmt = DEFAULT_PREVIEW_FORMAT;
+    stream->s_config.stream_info->dim.width = sizeof(metadata_buffer_t);
+    stream->s_config.stream_info->dim.height = 1;
+    stream->s_config.padding_info = cam_cap->padding_info;
+
+    rc = mm_app_config_stream(test_obj, channel, stream, &stream->s_config);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:config preview stream err=%d\n", __func__, rc);
+        return NULL;
+    }
+
+    return stream;
+}
+
+mm_camera_stream_t * mm_app_add_preview_stream(mm_camera_test_obj_t *test_obj,
+                                               mm_camera_channel_t *channel,
+                                               mm_camera_buf_notify_t stream_cb,
+                                               void *userdata,
+                                               uint8_t num_bufs)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    cam_capability_t *cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+
+    stream = mm_app_add_stream(test_obj, channel);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        return NULL;
+    }
+    stream->s_config.mem_vtbl.get_bufs = mm_app_stream_initbuf;
+    stream->s_config.mem_vtbl.put_bufs = mm_app_stream_deinitbuf;
+    stream->s_config.mem_vtbl.clean_invalidate_buf =
+      mm_app_stream_clean_invalidate_buf;
+    stream->s_config.mem_vtbl.invalidate_buf = mm_app_stream_invalidate_buf;
+    stream->s_config.mem_vtbl.user_data = (void *)stream;
+    stream->s_config.stream_cb = stream_cb;
+    stream->s_config.userdata = userdata;
+    stream->num_of_bufs = num_bufs;
+
+    stream->s_config.stream_info = (cam_stream_info_t *)stream->s_info_buf.buf.buffer;
+    memset(stream->s_config.stream_info, 0, sizeof(cam_stream_info_t));
+    stream->s_config.stream_info->stream_type = CAM_STREAM_TYPE_PREVIEW;
+    stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    stream->s_config.stream_info->fmt = DEFAULT_PREVIEW_FORMAT;
+
+    if ((test_obj->preview_resolution.user_input_display_width == 0) ||
+           ( test_obj->preview_resolution.user_input_display_height == 0)) {
+        stream->s_config.stream_info->dim.width = DEFAULT_PREVIEW_WIDTH;
+        stream->s_config.stream_info->dim.height = DEFAULT_PREVIEW_HEIGHT;
+    } else {
+        stream->s_config.stream_info->dim.width = test_obj->preview_resolution.user_input_display_width;
+        stream->s_config.stream_info->dim.height = test_obj->preview_resolution.user_input_display_height;
+    }
+
+    stream->s_config.padding_info = cam_cap->padding_info;
+
+    rc = mm_app_config_stream(test_obj, channel, stream, &stream->s_config);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:config preview stream err=%d\n", __func__, rc);
+        return NULL;
+    }
+
+    return stream;
+}
+
+mm_camera_stream_t * mm_app_add_raw_stream(mm_camera_test_obj_t *test_obj,
+                                                mm_camera_channel_t *channel,
+                                                mm_camera_buf_notify_t stream_cb,
+                                                void *userdata,
+                                                uint8_t num_bufs,
+                                                uint8_t num_burst)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    cam_capability_t *cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+
+    stream = mm_app_add_stream(test_obj, channel);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        return NULL;
+    }
+
+    stream->s_config.mem_vtbl.get_bufs = mm_app_stream_initbuf;
+    stream->s_config.mem_vtbl.put_bufs = mm_app_stream_deinitbuf;
+    stream->s_config.mem_vtbl.invalidate_buf = mm_app_stream_invalidate_buf;
+    stream->s_config.mem_vtbl.user_data = (void *)stream;
+    stream->s_config.stream_cb = stream_cb;
+    stream->s_config.userdata = userdata;
+    stream->num_of_bufs = num_bufs;
+
+    stream->s_config.stream_info = (cam_stream_info_t *)stream->s_info_buf.buf.buffer;
+    memset(stream->s_config.stream_info, 0, sizeof(cam_stream_info_t));
+    stream->s_config.stream_info->stream_type = CAM_STREAM_TYPE_RAW;
+    if (num_burst == 0) {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    } else {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_BURST;
+        stream->s_config.stream_info->num_of_burst = num_burst;
+    }
+    stream->s_config.stream_info->fmt = test_obj->buffer_format;
+    if ( test_obj->buffer_width == 0 || test_obj->buffer_height == 0 ) {
+        stream->s_config.stream_info->dim.width = DEFAULT_SNAPSHOT_WIDTH;
+        stream->s_config.stream_info->dim.height = DEFAULT_SNAPSHOT_HEIGHT;
+    } else {
+        stream->s_config.stream_info->dim.width = (int32_t)test_obj->buffer_width;
+        stream->s_config.stream_info->dim.height = (int32_t)test_obj->buffer_height;
+    }
+    stream->s_config.padding_info = cam_cap->padding_info;
+
+    rc = mm_app_config_stream(test_obj, channel, stream, &stream->s_config);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:config preview stream err=%d\n", __func__, rc);
+        return NULL;
+    }
+
+    return stream;
+}
+
+mm_camera_stream_t * mm_app_add_snapshot_stream(mm_camera_test_obj_t *test_obj,
+                                                mm_camera_channel_t *channel,
+                                                mm_camera_buf_notify_t stream_cb,
+                                                void *userdata,
+                                                uint8_t num_bufs,
+                                                uint8_t num_burst)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    cam_capability_t *cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+
+    stream = mm_app_add_stream(test_obj, channel);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        return NULL;
+    }
+
+    stream->s_config.mem_vtbl.get_bufs = mm_app_stream_initbuf;
+    stream->s_config.mem_vtbl.put_bufs = mm_app_stream_deinitbuf;
+    stream->s_config.mem_vtbl.clean_invalidate_buf =
+      mm_app_stream_clean_invalidate_buf;
+    stream->s_config.mem_vtbl.invalidate_buf = mm_app_stream_invalidate_buf;
+    stream->s_config.mem_vtbl.user_data = (void *)stream;
+    stream->s_config.stream_cb = stream_cb;
+    stream->s_config.userdata = userdata;
+    stream->num_of_bufs = num_bufs;
+
+    stream->s_config.stream_info = (cam_stream_info_t *)stream->s_info_buf.buf.buffer;
+    memset(stream->s_config.stream_info, 0, sizeof(cam_stream_info_t));
+    stream->s_config.stream_info->stream_type = CAM_STREAM_TYPE_SNAPSHOT;
+    if (num_burst == 0) {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    } else {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_BURST;
+        stream->s_config.stream_info->num_of_burst = num_burst;
+    }
+    stream->s_config.stream_info->fmt = DEFAULT_SNAPSHOT_FORMAT;
+    if ( test_obj->buffer_width == 0 || test_obj->buffer_height == 0 ) {
+        stream->s_config.stream_info->dim.width = DEFAULT_SNAPSHOT_WIDTH;
+        stream->s_config.stream_info->dim.height = DEFAULT_SNAPSHOT_HEIGHT;
+    } else {
+        stream->s_config.stream_info->dim.width = (int32_t)test_obj->buffer_width;
+        stream->s_config.stream_info->dim.height = (int32_t)test_obj->buffer_height;
+    }
+    stream->s_config.padding_info = cam_cap->padding_info;
+
+    rc = mm_app_config_stream(test_obj, channel, stream, &stream->s_config);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:config preview stream err=%d\n", __func__, rc);
+        return NULL;
+    }
+
+    return stream;
+}
+
+mm_camera_channel_t * mm_app_add_preview_channel(mm_camera_test_obj_t *test_obj)
+{
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *stream = NULL;
+
+    channel = mm_app_add_channel(test_obj,
+                                 MM_CHANNEL_TYPE_PREVIEW,
+                                 NULL,
+                                 NULL,
+                                 NULL);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return NULL;
+    }
+
+    stream = mm_app_add_preview_stream(test_obj,
+                                       channel,
+                                       mm_app_preview_notify_cb,
+                                       (void *)test_obj,
+                                       PREVIEW_BUF_NUM);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return NULL;
+    }
+
+    return channel;
+}
+
+int mm_app_stop_and_del_channel(mm_camera_test_obj_t *test_obj,
+                                mm_camera_channel_t *channel)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    uint8_t i;
+
+    rc = mm_app_stop_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:Stop Preview failed rc=%d\n", __func__, rc);
+    }
+
+    if (channel->num_streams <= MAX_STREAM_NUM_IN_BUNDLE) {
+        for (i = 0; i < channel->num_streams; i++) {
+            stream = &channel->streams[i];
+            rc = mm_app_del_stream(test_obj, channel, stream);
+            if (MM_CAMERA_OK != rc) {
+                CDBG_ERROR("%s:del stream(%d) failed rc=%d\n", __func__, i, rc);
+            }
+        }
+    } else {
+        CDBG_ERROR("%s: num_streams = %d. Should not be more than %d\n",
+            __func__, channel->num_streams, MAX_STREAM_NUM_IN_BUNDLE);
+    }
+    rc = mm_app_del_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:delete channel failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
+
+int mm_app_start_preview(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *stream = NULL;
+    mm_camera_stream_t *s_metadata = NULL;
+    uint8_t i;
+
+    channel =  mm_app_add_preview_channel(test_obj);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    s_metadata = mm_app_add_metadata_stream(test_obj,
+                                            channel,
+                                            mm_app_metadata_notify_cb,
+                                            (void *)test_obj,
+                                            PREVIEW_BUF_NUM);
+    if (NULL == s_metadata) {
+        CDBG_ERROR("%s: add metadata stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    rc = mm_app_start_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:start preview failed rc=%d\n", __func__, rc);
+        if (channel->num_streams <= MAX_STREAM_NUM_IN_BUNDLE) {
+            for (i = 0; i < channel->num_streams; i++) {
+                stream = &channel->streams[i];
+                mm_app_del_stream(test_obj, channel, stream);
+            }
+        }
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    return rc;
+}
+
+int mm_app_stop_preview(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+
+    mm_camera_channel_t *channel =
+        mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_PREVIEW);
+
+    rc = mm_app_stop_and_del_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:Stop Preview failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
+
+int mm_app_start_preview_zsl(mm_camera_test_obj_t *test_obj)
+{
+    int32_t rc = MM_CAMERA_OK;
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *s_preview = NULL;
+    mm_camera_stream_t *s_metadata = NULL;
+    mm_camera_stream_t *s_main = NULL;
+    mm_camera_channel_attr_t attr;
+
+    memset(&attr, 0, sizeof(mm_camera_channel_attr_t));
+    attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS;
+    attr.look_back = 2;
+    attr.post_frame_skip = 0;
+    attr.water_mark = 2;
+    attr.max_unmatched_frames = 3;
+    channel = mm_app_add_channel(test_obj,
+                                 MM_CHANNEL_TYPE_ZSL,
+                                 &attr,
+                                 mm_app_zsl_notify_cb,
+                                 test_obj);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    s_preview = mm_app_add_preview_stream(test_obj,
+                                          channel,
+                                          mm_app_preview_notify_cb,
+                                          (void *)test_obj,
+                                          PREVIEW_BUF_NUM);
+    if (NULL == s_preview) {
+        CDBG_ERROR("%s: add preview stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    s_metadata = mm_app_add_metadata_stream(test_obj,
+                                            channel,
+                                            mm_app_metadata_notify_cb,
+                                            (void *)test_obj,
+                                            PREVIEW_BUF_NUM);
+    if (NULL == s_metadata) {
+        CDBG_ERROR("%s: add metadata stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    s_main = mm_app_add_snapshot_stream(test_obj,
+                                        channel,
+                                        NULL,
+                                        NULL,
+                                        PREVIEW_BUF_NUM,
+                                        0);
+    if (NULL == s_main) {
+        CDBG_ERROR("%s: add main snapshot stream failed\n", __func__);
+        mm_app_del_stream(test_obj, channel, s_preview);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    rc = mm_app_start_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:start zsl failed rc=%d\n", __func__, rc);
+        mm_app_del_stream(test_obj, channel, s_preview);
+        mm_app_del_stream(test_obj, channel, s_metadata);
+        mm_app_del_stream(test_obj, channel, s_main);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    if ( test_obj->enable_reproc ) {
+        if ( NULL == mm_app_add_reprocess_channel(test_obj, s_main) ) {
+            CDBG_ERROR("%s: Reprocess channel failed to initialize \n", __func__);
+            mm_app_del_stream(test_obj, channel, s_preview);
+#ifdef USE_METADATA_STREAM
+            mm_app_del_stream(test_obj, channel, s_metadata);
+#endif
+            mm_app_del_stream(test_obj, channel, s_main);
+            mm_app_del_channel(test_obj, channel);
+            return rc;
+        }
+        rc = mm_app_start_reprocess(test_obj);
+        if (MM_CAMERA_OK != rc) {
+            CDBG_ERROR("%s: reprocess start failed rc=%d\n", __func__, rc);
+            mm_app_del_stream(test_obj, channel, s_preview);
+#ifdef USE_METADATA_STREAM
+            mm_app_del_stream(test_obj, channel, s_metadata);
+#endif
+            mm_app_del_stream(test_obj, channel, s_main);
+            mm_app_del_channel(test_obj, channel);
+            return rc;
+        }
+    }
+
+    return rc;
+}
+
+int mm_app_stop_preview_zsl(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+
+    mm_camera_channel_t *channel =
+        mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_ZSL);
+
+    rc = mm_app_stop_and_del_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:Stop Preview failed rc=%d\n", __func__, rc);
+    }
+
+    if ( test_obj->enable_reproc ) {
+        rc |= mm_app_stop_reprocess(test_obj);
+    }
+
+    return rc;
+}
+
+int mm_app_initialize_fb(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    int brightness_fd;
+    const char brightness_level[] = BACKLIGHT_LEVEL;
+    void *fb_base = NULL;
+
+    assert( ( NULL != test_obj ) && ( 0 == test_obj->fb_fd ) );
+
+    test_obj->fb_fd = open(FB_PATH, O_RDWR);
+    if ( 0 > test_obj->fb_fd ) {
+        CDBG_ERROR("%s: FB device open failed rc=%d, %s\n",
+                   __func__,
+                   -errno,
+                   strerror(errno));
+        rc = -errno;
+        goto FAIL;
+    }
+
+    rc = ioctl(test_obj->fb_fd, FBIOGET_VSCREENINFO, &test_obj->vinfo);
+    if ( MM_CAMERA_OK != rc ) {
+        CDBG_ERROR("%s: Can not retrieve screen info rc=%d, %s\n",
+                   __func__,
+                   -errno,
+                   strerror(errno));
+        rc = -errno;
+        goto FAIL;
+    }
+
+    if ( ( 0 == test_obj->vinfo.yres_virtual ) ||
+         ( 0 == test_obj->vinfo.yres ) ||
+         ( test_obj->vinfo.yres > test_obj->vinfo.yres_virtual ) ) {
+        CDBG_ERROR("%s: Invalid FB virtual yres: %d, yres: %d\n",
+                   __func__,
+                   test_obj->vinfo.yres_virtual,
+                   test_obj->vinfo.yres);
+        rc = MM_CAMERA_E_GENERAL;
+        goto FAIL;
+    }
+
+    if ( ( 0 == test_obj->vinfo.xres_virtual ) ||
+         ( 0 == test_obj->vinfo.xres ) ||
+         ( test_obj->vinfo.xres > test_obj->vinfo.xres_virtual ) ) {
+        CDBG_ERROR("%s: Invalid FB virtual xres: %d, xres: %d\n",
+                   __func__,
+                   test_obj->vinfo.xres_virtual,
+                   test_obj->vinfo.xres);
+        rc = MM_CAMERA_E_GENERAL;
+        goto FAIL;
+    }
+
+    brightness_fd = open(BACKLIGHT_CONTROL, O_RDWR);
+    if ( brightness_fd >= 0 ) {
+        write(brightness_fd, brightness_level, strlen(brightness_level));
+        close(brightness_fd);
+    }
+
+    test_obj->slice_size = test_obj->vinfo.xres * ( test_obj->vinfo.yres - 1 ) * DEFAULT_OV_FORMAT_BPP;
+    memset(&test_obj->data_overlay, 0, sizeof(struct mdp_overlay));
+    test_obj->data_overlay.src.width  = test_obj->buffer_width;
+    test_obj->data_overlay.src.height = test_obj->buffer_height;
+    test_obj->data_overlay.src_rect.w = test_obj->buffer_width;
+    test_obj->data_overlay.src_rect.h = test_obj->buffer_height;
+    test_obj->data_overlay.dst_rect.w = test_obj->buffer_width;
+    test_obj->data_overlay.dst_rect.h = test_obj->buffer_height;
+    test_obj->data_overlay.src.format = DEFAULT_OV_FORMAT;
+    test_obj->data_overlay.src_rect.x = 0;
+    test_obj->data_overlay.src_rect.y = 0;
+    test_obj->data_overlay.dst_rect.x = 0;
+    test_obj->data_overlay.dst_rect.y = 0;
+    test_obj->data_overlay.z_order = 2;
+    test_obj->data_overlay.alpha = 0x80;
+    test_obj->data_overlay.transp_mask = 0xffe0;
+    test_obj->data_overlay.flags = MDP_FLIP_LR | MDP_FLIP_UD;
+
+    // Map and clear FB portion
+    fb_base = mmap(0,
+                   test_obj->slice_size,
+                   PROT_WRITE,
+                   MAP_SHARED,
+                   test_obj->fb_fd,
+                   0);
+    if ( MAP_FAILED  == fb_base ) {
+            CDBG_ERROR("%s: ( Error while memory mapping frame buffer %s",
+                       __func__,
+                       strerror(errno));
+            rc = -errno;
+            goto FAIL;
+    }
+
+    memset(fb_base, 0, test_obj->slice_size);
+
+    if (ioctl(test_obj->fb_fd, FBIOPAN_DISPLAY, &test_obj->vinfo) < 0) {
+        CDBG_ERROR("%s : FBIOPAN_DISPLAY failed!", __func__);
+        rc = -errno;
+        goto FAIL;
+    }
+
+    munmap(fb_base, test_obj->slice_size);
+    test_obj->data_overlay.id = (uint32_t)MSMFB_NEW_REQUEST;
+    rc = ioctl(test_obj->fb_fd, MSMFB_OVERLAY_SET, &test_obj->data_overlay);
+    if (rc < 0) {
+        CDBG_ERROR("%s : MSMFB_OVERLAY_SET failed! err=%d\n",
+                   __func__,
+                   test_obj->data_overlay.id);
+        return MM_CAMERA_E_GENERAL;
+    }
+    CDBG_ERROR("%s: Overlay set with overlay id: %d", __func__, test_obj->data_overlay.id);
+
+    return rc;
+
+FAIL:
+
+    if ( 0 < test_obj->fb_fd ) {
+        close(test_obj->fb_fd);
+    }
+
+    return rc;
+}
+
+int mm_app_close_fb(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+
+    assert( ( NULL != test_obj ) && ( 0 < test_obj->fb_fd ) );
+
+    if (ioctl(test_obj->fb_fd, MSMFB_OVERLAY_UNSET, &test_obj->data_overlay.id)) {
+        CDBG_ERROR("\nERROR! MSMFB_OVERLAY_UNSET failed! (Line %d)\n", __LINE__);
+    }
+
+    if (ioctl(test_obj->fb_fd, FBIOPAN_DISPLAY, &test_obj->vinfo) < 0) {
+        CDBG_ERROR("ERROR: FBIOPAN_DISPLAY failed! line=%d\n", __LINE__);
+    }
+
+    close(test_obj->fb_fd);
+    test_obj->fb_fd = -1;
+
+    return rc;
+}
+
+void memset16(void *pDst, uint16_t value, int count)
+{
+    uint16_t *ptr = pDst;
+    while (count--)
+        *ptr++ = value;
+}
+
+int mm_app_overlay_display(mm_camera_test_obj_t *test_obj, int bufferFd)
+{
+    int rc = MM_CAMERA_OK;
+    struct msmfb_overlay_data ovdata;
+
+
+    memset(&ovdata, 0, sizeof(struct msmfb_overlay_data));
+    ovdata.id = test_obj->data_overlay.id;
+    ovdata.data.memory_id = bufferFd;
+
+    if (ioctl(test_obj->fb_fd, MSMFB_OVERLAY_PLAY, &ovdata)) {
+        CDBG_ERROR("%s : MSMFB_OVERLAY_PLAY failed!", __func__);
+        return MM_CAMERA_E_GENERAL;
+    }
+
+    if (ioctl(test_obj->fb_fd, FBIOPAN_DISPLAY, &test_obj->vinfo) < 0) {
+        CDBG_ERROR("%s : FBIOPAN_DISPLAY failed!", __func__);
+        return MM_CAMERA_E_GENERAL;
+    }
+
+    return rc;
+}
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_queue.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_queue.c
new file mode 100644
index 0000000..0d49624
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_queue.c
@@ -0,0 +1,167 @@
+/* Copyright (c) 2012, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#include "mm_qcamera_dbg.h"
+#include "mm_qcamera_app.h"
+
+int mm_camera_queue_init(mm_camera_queue_t *queue,
+                         release_data_fn data_rel_fn,
+                         void *user_data)
+{
+    if ( NULL == queue ) {
+        return -1;
+    }
+
+    pthread_mutex_init(&queue->m_lock, NULL);
+    cam_list_init(&queue->m_head.list);
+    queue->m_size = 0;
+    queue->m_dataFn = data_rel_fn;
+    queue->m_userData = user_data;
+
+    return MM_CAMERA_OK;
+}
+
+int mm_qcamera_queue_release(mm_camera_queue_t *queue)
+{
+    if ( NULL == queue ) {
+        return -1;
+    }
+
+    mm_qcamera_queue_flush(queue);
+    pthread_mutex_destroy(&queue->m_lock);
+
+    return MM_CAMERA_OK;
+}
+
+int mm_qcamera_queue_isempty(mm_camera_queue_t *queue)
+{
+    if ( NULL == queue ) {
+        return 0;
+    }
+
+    int flag = 1;
+    pthread_mutex_lock(&queue->m_lock);
+    if (queue->m_size > 0) {
+        flag = 0;
+    }
+    pthread_mutex_unlock(&queue->m_lock);
+
+    return flag;
+}
+
+int mm_qcamera_queue_enqueue(mm_camera_queue_t *queue, void *data)
+{
+    if ( NULL == queue ) {
+        return -1;
+    }
+
+    camera_q_node *node =
+        (camera_q_node *)malloc(sizeof(camera_q_node));
+    if (NULL == node) {
+        CDBG_ERROR("%s: No memory for camera_q_node", __func__);
+        return 0;
+    }
+
+    memset(node, 0, sizeof(camera_q_node));
+    node->data = data;
+
+    pthread_mutex_lock(&queue->m_lock);
+    cam_list_add_tail_node(&node->list, &queue->m_head.list);
+    queue->m_size++;
+    pthread_mutex_unlock(&queue->m_lock);
+
+    return 1;
+}
+
+void* mm_qcamera_queue_dequeue(mm_camera_queue_t *queue, int bFromHead)
+{
+    if ( NULL == queue ) {
+        return NULL;
+    }
+
+    camera_q_node* node = NULL;
+    void* data = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    pthread_mutex_lock(&queue->m_lock);
+    head = &queue->m_head.list;
+    if (bFromHead) {
+        pos = head->next;
+    } else {
+        pos = head->prev;
+    }
+    if (pos != head) {
+        node = member_of(pos, camera_q_node, list);
+        cam_list_del_node(&node->list);
+        queue->m_size--;
+    }
+    pthread_mutex_unlock(&queue->m_lock);
+
+    if (NULL != node) {
+        data = node->data;
+        free(node);
+    }
+
+    return data;
+}
+
+void mm_qcamera_queue_flush(mm_camera_queue_t *queue)
+{
+    camera_q_node* node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    if ( NULL == queue ) {
+        return;
+    }
+
+    pthread_mutex_lock(&queue->m_lock);
+    head = &queue->m_head.list;
+    pos = head->next;
+
+    while(pos != head) {
+        node = member_of(pos, camera_q_node, list);
+        pos = pos->next;
+        cam_list_del_node(&node->list);
+        queue->m_size--;
+
+        if (NULL != node->data) {
+            if (queue->m_dataFn) {
+                queue->m_dataFn(node->data, queue->m_userData);
+            }
+            free(node->data);
+        }
+        free(node);
+
+    }
+    queue->m_size = 0;
+    pthread_mutex_unlock(&queue->m_lock);
+}
+
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_rdi.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_rdi.c
new file mode 100644
index 0000000..a5eff0d
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_rdi.c
@@ -0,0 +1,316 @@
+/*
+Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "mm_qcamera_dbg.h"
+#include "mm_qcamera_app.h"
+
+static uint32_t rdi_len = 0;
+
+static void mm_app_rdi_dump_frame(mm_camera_buf_def_t *frame,
+                                  char *name,
+                                  char *ext,
+                                  uint32_t frame_idx)
+{
+    char file_name[FILENAME_MAX];
+    int file_fd;
+    int i;
+
+    if (frame != NULL) {
+        snprintf(file_name, sizeof(file_name),
+            QCAMERA_DUMP_FRM_LOCATION"%s_%03u.%s", name, frame_idx, ext);
+        file_fd = open(file_name, O_RDWR | O_CREAT, 0777);
+        if (file_fd < 0) {
+            CDBG_ERROR("%s: cannot open file %s \n", __func__, file_name);
+        } else {
+            for (i = 0; i < frame->planes_buf.num_planes; i++) {
+                write(file_fd,
+                      (uint8_t *)frame->buffer + frame->planes_buf.planes[i].data_offset,
+                      rdi_len);
+            }
+
+            close(file_fd);
+            CDBG("%s: dump rdi frame %s", __func__,file_name);
+        }
+    }
+}
+
+static void mm_app_rdi_notify_cb(mm_camera_super_buf_t *bufs,
+                                 void *user_data)
+{
+    char file_name[FILENAME_MAX];
+    mm_camera_buf_def_t *frame = bufs->bufs[0];
+    mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+
+    CDBG("%s: BEGIN - length=%zu, frame idx = %d stream_id=%d\n",
+         __func__, frame->frame_len, frame->frame_idx, frame->stream_id);
+    snprintf(file_name, sizeof(file_name), "RDI_dump_%d", pme->cam->camera_handle);
+    mm_app_rdi_dump_frame(frame, file_name, "raw", frame->frame_idx);
+
+    if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                            bufs->ch_id,
+                                            frame)) {
+        CDBG_ERROR("%s: Failed in RDI Qbuf\n", __func__);
+    }
+    mm_app_cache_ops((mm_camera_app_meminfo_t *)frame->mem_info,
+                     ION_IOC_INV_CACHES);
+
+    CDBG("%s: END\n", __func__);
+}
+
+mm_camera_stream_t * mm_app_add_rdi_stream(mm_camera_test_obj_t *test_obj,
+                                               mm_camera_channel_t *channel,
+                                               mm_camera_buf_notify_t stream_cb,
+                                               void *userdata,
+                                               uint8_t num_bufs,
+                                               uint8_t num_burst)
+{
+    int rc = MM_CAMERA_OK;
+    size_t i;
+    mm_camera_stream_t *stream = NULL;
+    cam_capability_t *cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+    cam_format_t fmt = CAM_FORMAT_MAX;
+    cam_stream_buf_plane_info_t *buf_planes;
+
+    stream = mm_app_add_stream(test_obj, channel);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        return NULL;
+    }
+
+    CDBG_ERROR("%s: raw_dim w:%d height:%d\n", __func__, cam_cap->raw_dim[0].width, cam_cap->raw_dim[0].height);
+    for (i = 0;i < cam_cap->supported_raw_fmt_cnt;i++) {
+        CDBG_ERROR("%s: supported_raw_fmts[%zd]=%d\n", __func__,
+            i, (int)cam_cap->supported_raw_fmts[i]);
+        if (((CAM_FORMAT_BAYER_MIPI_RAW_8BPP_GBRG <= cam_cap->supported_raw_fmts[i]) &&
+            (CAM_FORMAT_BAYER_MIPI_RAW_12BPP_BGGR >= cam_cap->supported_raw_fmts[i])) ||
+            (cam_cap->supported_raw_fmts[i] == CAM_FORMAT_META_RAW_8BIT) ||
+            (cam_cap->supported_raw_fmts[i] == CAM_FORMAT_JPEG_RAW_8BIT))
+        {
+            fmt = cam_cap->supported_raw_fmts[i];
+            CDBG_ERROR("%s: fmt=%d\n", __func__, fmt);
+        }
+    }
+
+    if (CAM_FORMAT_MAX == fmt) {
+        CDBG_ERROR("%s: rdi format not supported\n", __func__);
+        return NULL;
+    }
+
+    stream->s_config.mem_vtbl.get_bufs = mm_app_stream_initbuf;
+    stream->s_config.mem_vtbl.put_bufs = mm_app_stream_deinitbuf;
+    stream->s_config.mem_vtbl.clean_invalidate_buf =
+      mm_app_stream_clean_invalidate_buf;
+    stream->s_config.mem_vtbl.invalidate_buf = mm_app_stream_invalidate_buf;
+    stream->s_config.mem_vtbl.user_data = (void *)stream;
+    stream->s_config.stream_cb = stream_cb;
+    stream->s_config.userdata = userdata;
+    stream->num_of_bufs = num_bufs;
+
+    stream->s_config.stream_info = (cam_stream_info_t *)stream->s_info_buf.buf.buffer;
+    memset(stream->s_config.stream_info, 0, sizeof(cam_stream_info_t));
+    stream->s_config.stream_info->stream_type = CAM_STREAM_TYPE_RAW;
+    if (num_burst == 0) {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    } else {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_BURST;
+        stream->s_config.stream_info->num_of_burst = num_burst;
+    }
+    stream->s_config.stream_info->fmt = fmt;
+    CDBG("%s: RAW: w: %d, h: %d ", __func__,
+       cam_cap->raw_dim[0].width, cam_cap->raw_dim[0].height);
+
+    stream->s_config.stream_info->dim.width = cam_cap->raw_dim[0].width;
+    stream->s_config.stream_info->dim.height = cam_cap->raw_dim[0].height;
+    stream->s_config.padding_info = cam_cap->padding_info;
+
+    rc = mm_app_config_stream(test_obj, channel, stream, &stream->s_config);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:config rdi stream err=%d\n", __func__, rc);
+        return NULL;
+    }
+
+    buf_planes = &stream->s_config.stream_info->buf_planes;
+    rdi_len = buf_planes->plane_info.mp[0].len;
+    CDBG("%s: plane_info %dx%d len:%d frame_len:%d\n", __func__,
+        buf_planes->plane_info.mp[0].stride, buf_planes->plane_info.mp[0].scanline,
+        buf_planes->plane_info.mp[0].len, buf_planes->plane_info.frame_len);
+
+    return stream;
+}
+
+mm_camera_stream_t * mm_app_add_rdi_snapshot_stream(mm_camera_test_obj_t *test_obj,
+                                                mm_camera_channel_t *channel,
+                                                mm_camera_buf_notify_t stream_cb,
+                                                void *userdata,
+                                                uint8_t num_bufs,
+                                                uint8_t num_burst)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    cam_capability_t *cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+
+    stream = mm_app_add_stream(test_obj, channel);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        return NULL;
+    }
+
+    stream->s_config.mem_vtbl.get_bufs = mm_app_stream_initbuf;
+    stream->s_config.mem_vtbl.put_bufs = mm_app_stream_deinitbuf;
+    stream->s_config.mem_vtbl.clean_invalidate_buf =
+      mm_app_stream_clean_invalidate_buf;
+    stream->s_config.mem_vtbl.invalidate_buf = mm_app_stream_invalidate_buf;
+    stream->s_config.mem_vtbl.user_data = (void *)stream;
+    stream->s_config.stream_cb = stream_cb;
+    stream->s_config.userdata = userdata;
+    stream->num_of_bufs = num_bufs;
+
+    stream->s_config.stream_info = (cam_stream_info_t *)stream->s_info_buf.buf.buffer;
+    memset(stream->s_config.stream_info, 0, sizeof(cam_stream_info_t));
+    stream->s_config.stream_info->stream_type = CAM_STREAM_TYPE_SNAPSHOT;
+    if (num_burst == 0) {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    } else {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_BURST;
+        stream->s_config.stream_info->num_of_burst = num_burst;
+    }
+    stream->s_config.stream_info->fmt = DEFAULT_SNAPSHOT_FORMAT;
+    stream->s_config.stream_info->dim.width = DEFAULT_SNAPSHOT_WIDTH;
+    stream->s_config.stream_info->dim.height = DEFAULT_SNAPSHOT_HEIGHT;
+    stream->s_config.padding_info = cam_cap->padding_info;
+
+    rc = mm_app_config_stream(test_obj, channel, stream, &stream->s_config);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:config rdi stream err=%d\n", __func__, rc);
+        return NULL;
+    }
+
+    return stream;
+}
+
+mm_camera_channel_t * mm_app_add_rdi_channel(mm_camera_test_obj_t *test_obj, uint8_t num_burst)
+{
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *stream = NULL;
+
+    channel = mm_app_add_channel(test_obj,
+                                 MM_CHANNEL_TYPE_RDI,
+                                 NULL,
+                                 NULL,
+                                 NULL);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return NULL;
+    }
+
+    stream = mm_app_add_rdi_stream(test_obj,
+                                       channel,
+                                       mm_app_rdi_notify_cb,
+                                       (void *)test_obj,
+                                       RDI_BUF_NUM,
+                                       num_burst);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return NULL;
+    }
+
+    CDBG("%s: channel=%d stream=%d\n", __func__, channel->ch_id, stream->s_id);
+    return channel;
+}
+
+int mm_app_stop_and_del_rdi_channel(mm_camera_test_obj_t *test_obj,
+                                mm_camera_channel_t *channel)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    uint8_t i;
+
+    rc = mm_app_stop_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:Stop RDI failed rc=%d\n", __func__, rc);
+    }
+
+    if (channel->num_streams <= MAX_STREAM_NUM_IN_BUNDLE) {
+        for (i = 0; i < channel->num_streams; i++) {
+            stream = &channel->streams[i];
+            rc = mm_app_del_stream(test_obj, channel, stream);
+            if (MM_CAMERA_OK != rc) {
+                CDBG_ERROR("%s:del stream(%d) failed rc=%d\n", __func__, i, rc);
+            }
+        }
+    } else {
+        CDBG_ERROR("%s: num_streams = %d. Should not be more than %d\n",
+            __func__, channel->num_streams, MAX_STREAM_NUM_IN_BUNDLE);
+    }
+
+    rc = mm_app_del_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:delete channel failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
+
+int mm_app_start_rdi(mm_camera_test_obj_t *test_obj, uint8_t num_burst)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *channel = NULL;
+
+    channel = mm_app_add_rdi_channel(test_obj, num_burst);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    rc = mm_app_start_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:start rdi failed rc=%d\n", __func__, rc);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    return rc;
+}
+
+int mm_app_stop_rdi(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+
+    mm_camera_channel_t *channel =
+        mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_RDI);
+
+    rc = mm_app_stop_and_del_rdi_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:Stop RDI failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
+
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_reprocess.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_reprocess.c
new file mode 100644
index 0000000..2c097c9
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_reprocess.c
@@ -0,0 +1,361 @@
+/*
+Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "mm_qcamera_dbg.h"
+#include "mm_qcamera_app.h"
+
+static void mm_app_reprocess_notify_cb(mm_camera_super_buf_t *bufs,
+                                   void *user_data)
+{
+    mm_camera_buf_def_t *frame = bufs->bufs[0];
+    mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *m_stream = NULL;
+    mm_camera_buf_def_t *m_frame = NULL;
+    mm_camera_super_buf_t *src_frame;
+    int i = 0;
+    int rc = 0;
+
+    CDBG_ERROR("%s: BEGIN - length=%zu, frame idx = %d\n",
+         __func__, frame->frame_len, frame->frame_idx);
+
+    /* find channel */
+    for (i = 0; i < MM_CHANNEL_TYPE_MAX; i++) {
+        if (pme->channels[i].ch_id == bufs->ch_id) {
+            channel = &pme->channels[i];
+            break;
+        }
+    }
+    if (NULL == channel) {
+        CDBG_ERROR("%s: Wrong channel id (%d)", __func__, bufs->ch_id);
+        return;
+    }
+
+    // We have only one stream and buffer
+    // in the reprocess channel.
+    m_stream = &channel->streams[0];
+    m_frame = bufs->bufs[0];
+
+    if ( pme->encodeJpeg ) {
+        pme->jpeg_buf.buf.buffer = (uint8_t *)malloc(m_frame->frame_len);
+        if ( NULL == pme->jpeg_buf.buf.buffer ) {
+            CDBG_ERROR("%s: error allocating jpeg output buffer", __func__);
+            goto exit;
+        }
+
+        pme->jpeg_buf.buf.frame_len = m_frame->frame_len;
+        /* create a new jpeg encoding session */
+        rc = createEncodingSession(pme, m_stream, m_frame);
+        if (0 != rc) {
+            CDBG_ERROR("%s: error creating jpeg session", __func__);
+            free(pme->jpeg_buf.buf.buffer);
+            goto exit;
+        }
+
+        /* start jpeg encoding job */
+        CDBG_ERROR("Encoding reprocessed frame!!");
+        rc = encodeData(pme, bufs, m_stream);
+        pme->encodeJpeg = 0;
+    } else {
+        if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                                bufs->ch_id,
+                                                frame)) {
+            CDBG_ERROR("%s: Failed in Reprocess Qbuf\n", __func__);
+        }
+        mm_app_cache_ops((mm_camera_app_meminfo_t *)frame->mem_info,
+                         ION_IOC_INV_CACHES);
+    }
+
+exit:
+
+// Release source frame
+    src_frame = ( mm_camera_super_buf_t * ) mm_qcamera_queue_dequeue(&pme->pp_frames, 1);
+    if ( NULL != src_frame ) {
+        mm_app_release_ppinput((void *) src_frame, (void *) pme);
+    }
+
+    CDBG_ERROR("%s: END\n", __func__);
+}
+
+mm_camera_stream_t * mm_app_add_reprocess_stream_from_source(mm_camera_test_obj_t *test_obj,
+                                                             mm_camera_channel_t *channel,
+                                                             mm_camera_stream_t *source,
+                                                             mm_camera_buf_notify_t stream_cb,
+                                                             cam_pp_feature_config_t pp_config,
+                                                             void *userdata,
+                                                             uint8_t num_bufs)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    cam_capability_t *cam_cap = NULL;
+    cam_stream_info_t *source_stream_info;
+
+    if ( ( NULL == test_obj ) ||
+         ( NULL == channel ) ||
+         ( NULL == source ) ) {
+        CDBG_ERROR("%s: Invalid input\n", __func__);
+        return NULL;
+    }
+
+    cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+
+    stream = mm_app_add_stream(test_obj, channel);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        return NULL;
+    }
+
+    stream->s_config.mem_vtbl.get_bufs = mm_app_stream_initbuf;
+    stream->s_config.mem_vtbl.put_bufs = mm_app_stream_deinitbuf;
+    stream->s_config.mem_vtbl.clean_invalidate_buf =
+      mm_app_stream_clean_invalidate_buf;
+    stream->s_config.mem_vtbl.invalidate_buf = mm_app_stream_invalidate_buf;
+    stream->s_config.mem_vtbl.user_data = (void *)stream;
+    stream->s_config.stream_cb = stream_cb;
+    stream->s_config.userdata = userdata;
+    stream->num_of_bufs = num_bufs;
+
+    stream->s_config.stream_info = (cam_stream_info_t *)stream->s_info_buf.buf.buffer;
+    source_stream_info = (cam_stream_info_t *) source->s_info_buf.buf.buffer;
+    memset(stream->s_config.stream_info, 0, sizeof(cam_stream_info_t));
+    stream->s_config.stream_info->stream_type = CAM_STREAM_TYPE_OFFLINE_PROC;
+    stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    stream->s_config.stream_info->fmt = source_stream_info->fmt;
+    stream->s_config.stream_info->dim = source_stream_info->dim;
+    stream->s_config.padding_info = cam_cap->padding_info;
+
+
+    stream->s_config.stream_info->reprocess_config.pp_type = CAM_ONLINE_REPROCESS_TYPE;
+    stream->s_config.stream_info->reprocess_config.online.input_stream_id = source->s_config.stream_info->stream_svr_id;
+    stream->s_config.stream_info->reprocess_config.online.input_stream_type = source->s_config.stream_info->stream_type;
+    stream->s_config.stream_info->reprocess_config.pp_feature_config = pp_config;
+
+    rc = mm_app_config_stream(test_obj, channel, stream, &stream->s_config);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:config preview stream err=%d\n", __func__, rc);
+        return NULL;
+    }
+
+    return stream;
+}
+
+mm_camera_channel_t * mm_app_add_reprocess_channel(mm_camera_test_obj_t *test_obj,
+                                                   mm_camera_stream_t *source_stream)
+{
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *stream = NULL;
+
+    if ( NULL == source_stream ) {
+        CDBG_ERROR("%s: add reprocess stream failed\n", __func__);
+        return NULL;
+    }
+
+    channel = mm_app_add_channel(test_obj,
+                                 MM_CHANNEL_TYPE_REPROCESS,
+                                 NULL,
+                                 NULL,
+                                 NULL);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return NULL;
+    }
+
+    // pp feature config
+    cam_pp_feature_config_t pp_config;
+    memset(&pp_config, 0, sizeof(cam_pp_feature_config_t));
+
+    cam_capability_t *caps = ( cam_capability_t * ) ( test_obj->cap_buf.buf.buffer );
+    if (caps->min_required_pp_mask & CAM_QCOM_FEATURE_SHARPNESS) {
+        pp_config.feature_mask |= CAM_QCOM_FEATURE_SHARPNESS;
+        pp_config.sharpness = test_obj->reproc_sharpness;
+    }
+
+    if (test_obj->reproc_wnr.denoise_enable) {
+        pp_config.feature_mask |= CAM_QCOM_FEATURE_DENOISE2D;
+        pp_config.denoise2d = test_obj->reproc_wnr;
+    }
+
+    if (test_obj->enable_CAC) {
+        pp_config.feature_mask |= CAM_QCOM_FEATURE_CAC;
+    }
+
+    uint8_t minStreamBufNum = source_stream->num_of_bufs;
+    stream = mm_app_add_reprocess_stream_from_source(test_obj,
+                                     channel,
+                                     source_stream,
+                                     mm_app_reprocess_notify_cb,
+                                     pp_config,
+                                     (void *)test_obj,
+                                     minStreamBufNum);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add reprocess stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return NULL;
+    }
+    test_obj->reproc_stream = stream;
+
+    return channel;
+}
+
+int mm_app_start_reprocess(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *r_ch = NULL;
+
+    mm_camera_queue_init(&test_obj->pp_frames,
+                         mm_app_release_ppinput,
+                         ( void * ) test_obj);
+
+    r_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_REPROCESS);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s: No initialized reprocess channel d rc=%d\n",
+                    __func__,
+                    rc);
+        return rc;
+    }
+
+    rc = mm_app_start_channel(test_obj, r_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:start reprocess failed rc=%d\n", __func__, rc);
+        mm_app_del_channel(test_obj, r_ch);
+        return rc;
+    }
+
+    return rc;
+}
+
+int mm_app_stop_reprocess(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *r_ch = NULL;
+
+    r_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_REPROCESS);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s: No initialized reprocess channel d rc=%d\n",
+                    __func__,
+                    rc);
+        return rc;
+    }
+
+    rc = mm_app_stop_and_del_channel(test_obj, r_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:Stop Preview failed rc=%d\n", __func__, rc);
+    }
+
+    mm_qcamera_queue_release(&test_obj->pp_frames);
+    test_obj->reproc_stream = NULL;
+
+    return rc;
+}
+
+int mm_app_do_reprocess(mm_camera_test_obj_t *test_obj,
+                        mm_camera_buf_def_t *frame,
+                        uint32_t meta_idx,
+                        mm_camera_super_buf_t *super_buf,
+                        mm_camera_stream_t *src_meta)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *r_ch = NULL;
+    mm_camera_super_buf_t *src_buf = NULL;
+
+    if ( ( NULL == test_obj ) ||
+         ( NULL == frame ) ||
+         ( NULL == super_buf )) {
+        CDBG_ERROR("%s: Invalid input rc=%d\n",
+                    __func__,
+                    rc);
+        return rc;
+    }
+
+    if ( NULL == test_obj->reproc_stream ) {
+        CDBG_ERROR("%s: No reprocess stream rc=%d\n",
+                    __func__,
+                    rc);
+        return rc;
+    }
+
+    r_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_REPROCESS);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s: No reprocess channel rc=%d\n",
+                    __func__,
+                    rc);
+        return rc;
+    }
+
+    src_buf = ( mm_camera_super_buf_t * ) malloc(sizeof(mm_camera_super_buf_t));
+    if ( NULL == src_buf ) {
+        CDBG_ERROR("%s: No resources for src frame rc=%d\n",
+                    __func__,
+                    rc);
+        return -1;
+    }
+    memcpy(src_buf, super_buf, sizeof(mm_camera_super_buf_t));
+    mm_qcamera_queue_enqueue(&test_obj->pp_frames, src_buf);
+
+    cam_stream_parm_buffer_t param;
+    memset(&param, 0, sizeof(cam_stream_parm_buffer_t));
+    param.type = CAM_STREAM_PARAM_TYPE_DO_REPROCESS;
+    param.reprocess.buf_index = frame->buf_idx;
+    param.reprocess.frame_idx = frame->frame_idx;
+    if (src_meta != NULL) {
+        param.reprocess.meta_present = 1;
+        param.reprocess.meta_stream_handle = src_meta->s_config.stream_info->stream_svr_id;
+        param.reprocess.meta_buf_index = meta_idx;
+    } else {
+        CDBG_ERROR("%s: No metadata source stream rc=%d\n",
+                   __func__,
+                   rc);
+    }
+
+    test_obj->reproc_stream->s_config.stream_info->parm_buf = param;
+    rc = test_obj->cam->ops->set_stream_parms(test_obj->cam->camera_handle,
+                                              r_ch->ch_id,
+                                              test_obj->reproc_stream->s_id,
+                                              &test_obj->reproc_stream->s_config.stream_info->parm_buf);
+
+    return rc;
+}
+
+void mm_app_release_ppinput(void *data, void *user_data)
+{
+    uint32_t i = 0;
+    mm_camera_super_buf_t *recvd_frame  = ( mm_camera_super_buf_t * ) data;
+    mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+
+    for ( i = 0 ; i < recvd_frame->num_bufs ; i++) {
+        if (MM_CAMERA_OK != pme->cam->ops->qbuf(pme->cam->camera_handle,
+                                                recvd_frame->ch_id,
+                                                recvd_frame->bufs[i])) {
+            CDBG_ERROR("%s: Failed in Qbuf\n", __func__);
+        }
+        mm_app_cache_ops((mm_camera_app_meminfo_t *) recvd_frame->bufs[i]->mem_info,
+                         ION_IOC_INV_CACHES);
+    }
+}
+
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_snapshot.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_snapshot.c
new file mode 100644
index 0000000..b644e03
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_snapshot.c
@@ -0,0 +1,705 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "mm_qcamera_dbg.h"
+#include "mm_qcamera_app.h"
+
+/* This callback is received once the complete JPEG encoding is done */
+static void jpeg_encode_cb(jpeg_job_status_t status,
+                           uint32_t client_hdl,
+                           uint32_t jobId,
+                           mm_jpeg_output_t *p_buf,
+                           void *userData)
+{
+    uint32_t i = 0;
+    mm_camera_test_obj_t *pme = NULL;
+    CDBG("%s: BEGIN\n", __func__);
+
+    pme = (mm_camera_test_obj_t *)userData;
+    if (pme->jpeg_hdl != client_hdl ||
+        jobId != pme->current_job_id ||
+        !pme->current_job_frames) {
+        CDBG_ERROR("%s: NULL current job frames or not matching job ID (%d, %d)",
+                   __func__, jobId, pme->current_job_id);
+        return;
+    }
+
+    /* dump jpeg img */
+    CDBG_ERROR("%s: job %d, status=%d", __func__, jobId, status);
+    if (status == JPEG_JOB_STATUS_DONE && p_buf != NULL) {
+        mm_app_dump_jpeg_frame(p_buf->buf_vaddr, p_buf->buf_filled_len, "jpeg", "jpg", jobId);
+    }
+
+    /* buf done current encoding frames */
+    pme->current_job_id = 0;
+    for (i = 0; i < pme->current_job_frames->num_bufs; i++) {
+        if (MM_CAMERA_OK != pme->cam->ops->qbuf(pme->current_job_frames->camera_handle,
+                                                pme->current_job_frames->ch_id,
+                                                pme->current_job_frames->bufs[i])) {
+            CDBG_ERROR("%s: Failed in Qbuf\n", __func__);
+        }
+        mm_app_cache_ops((mm_camera_app_meminfo_t *) pme->current_job_frames->bufs[i]->mem_info,
+                         ION_IOC_INV_CACHES);
+    }
+
+    free(pme->jpeg_buf.buf.buffer);
+    free(pme->current_job_frames);
+    pme->current_job_frames = NULL;
+
+    /* signal snapshot is done */
+    mm_camera_app_done();
+}
+
+int encodeData(mm_camera_test_obj_t *test_obj, mm_camera_super_buf_t* recvd_frame,
+               mm_camera_stream_t *m_stream)
+{
+    cam_capability_t *cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+
+    int rc = -MM_CAMERA_E_GENERAL;
+    mm_jpeg_job_t job;
+
+    /* remember current frames being encoded */
+    test_obj->current_job_frames =
+        (mm_camera_super_buf_t *)malloc(sizeof(mm_camera_super_buf_t));
+    if (!test_obj->current_job_frames) {
+        CDBG_ERROR("%s: No memory for current_job_frames", __func__);
+        return rc;
+    }
+    *(test_obj->current_job_frames) = *recvd_frame;
+
+    memset(&job, 0, sizeof(job));
+    job.job_type = JPEG_JOB_TYPE_ENCODE;
+    job.encode_job.session_id = test_obj->current_jpeg_sess_id;
+
+    // TODO: Rotation should be set according to
+    //       sensor&device orientation
+    job.encode_job.rotation = 0;
+    if (cam_cap->position == CAM_POSITION_BACK) {
+        job.encode_job.rotation = 270;
+    }
+
+    /* fill in main src img encode param */
+    job.encode_job.main_dim.src_dim = m_stream->s_config.stream_info->dim;
+    job.encode_job.main_dim.dst_dim = m_stream->s_config.stream_info->dim;
+    job.encode_job.src_index = 0;
+
+    job.encode_job.thumb_dim.src_dim = m_stream->s_config.stream_info->dim;
+    job.encode_job.thumb_dim.dst_dim.width = DEFAULT_PREVIEW_WIDTH;
+    job.encode_job.thumb_dim.dst_dim.height = DEFAULT_PREVIEW_HEIGHT;
+
+    /* fill in sink img param */
+    job.encode_job.dst_index = 0;
+
+    if (test_obj->metadata != NULL) {
+        job.encode_job.p_metadata = test_obj->metadata;
+    } else {
+        CDBG_ERROR("%s: Metadata null, not set for jpeg encoding", __func__);
+    }
+
+    rc = test_obj->jpeg_ops.start_job(&job, &test_obj->current_job_id);
+    if ( 0 != rc ) {
+        free(test_obj->current_job_frames);
+        test_obj->current_job_frames = NULL;
+    }
+
+    return rc;
+}
+
+int createEncodingSession(mm_camera_test_obj_t *test_obj,
+                          mm_camera_stream_t *m_stream,
+                          mm_camera_buf_def_t *m_frame)
+{
+    mm_jpeg_encode_params_t encode_param;
+
+    memset(&encode_param, 0, sizeof(mm_jpeg_encode_params_t));
+    encode_param.jpeg_cb = jpeg_encode_cb;
+    encode_param.userdata = (void*)test_obj;
+    encode_param.encode_thumbnail = 0;
+    encode_param.quality = 85;
+    encode_param.color_format = MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2;
+    encode_param.thumb_color_format = MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2;
+
+    /* fill in main src img encode param */
+    encode_param.num_src_bufs = 1;
+    encode_param.src_main_buf[0].index = 0;
+    encode_param.src_main_buf[0].buf_size = m_frame->frame_len;
+    encode_param.src_main_buf[0].buf_vaddr = (uint8_t *)m_frame->buffer;
+    encode_param.src_main_buf[0].fd = m_frame->fd;
+    encode_param.src_main_buf[0].format = MM_JPEG_FMT_YUV;
+    encode_param.src_main_buf[0].offset = m_stream->offset;
+
+    /* fill in sink img param */
+    encode_param.num_dst_bufs = 1;
+    encode_param.dest_buf[0].index = 0;
+    encode_param.dest_buf[0].buf_size = test_obj->jpeg_buf.buf.frame_len;
+    encode_param.dest_buf[0].buf_vaddr = (uint8_t *)test_obj->jpeg_buf.buf.buffer;
+    encode_param.dest_buf[0].fd = test_obj->jpeg_buf.buf.fd;
+    encode_param.dest_buf[0].format = MM_JPEG_FMT_YUV;
+
+    /* main dimension */
+    encode_param.main_dim.src_dim = m_stream->s_config.stream_info->dim;
+    encode_param.main_dim.dst_dim = m_stream->s_config.stream_info->dim;
+
+    return test_obj->jpeg_ops.create_session(test_obj->jpeg_hdl,
+                                             &encode_param,
+                                             &test_obj->current_jpeg_sess_id);
+}
+
+/** mm_app_snapshot_metadata_notify_cb
+ *  @bufs: Pointer to super buffer
+ *  @user_data: Pointer to user data
+ *
+ *
+ **/
+static void mm_app_snapshot_metadata_notify_cb(mm_camera_super_buf_t *bufs,
+  void *user_data)
+{
+  uint32_t i = 0;
+  mm_camera_channel_t *channel = NULL;
+  mm_camera_stream_t *p_stream = NULL;
+  mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+  mm_camera_buf_def_t *frame;
+  metadata_buffer_t *pMetadata;
+
+  if (NULL == bufs || NULL == user_data) {
+    CDBG_ERROR("%s: bufs or user_data are not valid ", __func__);
+    return;
+  }
+  frame = bufs->bufs[0];
+
+  /* find channel */
+  for (i = 0; i < MM_CHANNEL_TYPE_MAX; i++) {
+    if (pme->channels[i].ch_id == bufs->ch_id) {
+      channel = &pme->channels[i];
+      break;
+    }
+  }
+
+  if (NULL == channel) {
+    CDBG_ERROR("%s: Channel object is null", __func__);
+    return;
+  }
+
+  /* find meta stream */
+  for (i = 0; i < channel->num_streams; i++) {
+    if (channel->streams[i].s_config.stream_info->stream_type == CAM_STREAM_TYPE_METADATA) {
+      p_stream = &channel->streams[i];
+      break;
+    }
+  }
+
+  if (NULL == p_stream) {
+    CDBG_ERROR("%s: cannot find metadata stream", __func__);
+    return;
+  }
+
+  /* find meta frame */
+  for (i = 0; i < bufs->num_bufs; i++) {
+    if (bufs->bufs[i]->stream_id == p_stream->s_id) {
+      frame = bufs->bufs[i];
+      break;
+    }
+  }
+
+  if (!pme->metadata) {
+    /* The app will free the metadata, we don't need to bother here */
+    pme->metadata = malloc(sizeof(metadata_buffer_t));
+    if (NULL == pme->metadata) {
+        CDBG_ERROR("%s: malloc failed", __func__);
+        return;
+    }
+  }
+
+  memcpy(pme->metadata , frame->buffer, sizeof(metadata_buffer_t));
+
+  pMetadata = (metadata_buffer_t *)frame->buffer;
+
+  IF_META_AVAILABLE(cam_auto_focus_data_t, focus_data,
+        CAM_INTF_META_AUTOFOCUS_DATA, pMetadata) {
+    if (focus_data->focus_state == CAM_AF_FOCUSED) {
+      CDBG_ERROR("%s: AutoFocus Done Call Back Received\n",__func__);
+      mm_camera_app_done();
+    } else if (focus_data->focus_state == CAM_AF_NOT_FOCUSED) {
+      CDBG_ERROR("%s: AutoFocus failed\n",__func__);
+      mm_camera_app_done();
+    }
+  }
+
+  if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                          bufs->ch_id,
+                                          frame)) {
+    CDBG_ERROR("%s: Failed in Preview Qbuf\n", __func__);
+  }
+  mm_app_cache_ops((mm_camera_app_meminfo_t *)frame->mem_info,
+                   ION_IOC_INV_CACHES);
+}
+
+static void mm_app_snapshot_notify_cb_raw(mm_camera_super_buf_t *bufs,
+                                          void *user_data)
+{
+
+    int rc;
+    uint32_t i = 0;
+    mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *m_stream = NULL;
+    mm_camera_buf_def_t *m_frame = NULL;
+
+    CDBG("%s: BEGIN\n", __func__);
+
+    /* find channel */
+    for (i = 0; i < MM_CHANNEL_TYPE_MAX; i++) {
+        if (pme->channels[i].ch_id == bufs->ch_id) {
+            channel = &pme->channels[i];
+            break;
+        }
+    }
+    if (NULL == channel) {
+        CDBG_ERROR("%s: Wrong channel id (%d)", __func__, bufs->ch_id);
+        rc = -1;
+        goto EXIT;
+    }
+
+    /* find snapshot stream */
+    for (i = 0; i < channel->num_streams; i++) {
+        if (channel->streams[i].s_config.stream_info->stream_type == CAM_STREAM_TYPE_RAW) {
+            m_stream = &channel->streams[i];
+            break;
+        }
+    }
+    if (NULL == m_stream) {
+        CDBG_ERROR("%s: cannot find snapshot stream", __func__);
+        rc = -1;
+        goto EXIT;
+    }
+
+    /* find snapshot frame */
+    for (i = 0; i < bufs->num_bufs; i++) {
+        if (bufs->bufs[i]->stream_id == m_stream->s_id) {
+            m_frame = bufs->bufs[i];
+            break;
+        }
+    }
+    if (NULL == m_frame) {
+        CDBG_ERROR("%s: main frame is NULL", __func__);
+        rc = -1;
+        goto EXIT;
+    }
+
+    mm_app_dump_frame(m_frame, "main", "raw", m_frame->frame_idx);
+
+EXIT:
+    for (i=0; i<bufs->num_bufs; i++) {
+        if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                                bufs->ch_id,
+                                                bufs->bufs[i])) {
+            CDBG_ERROR("%s: Failed in Qbuf\n", __func__);
+        }
+    }
+
+    mm_camera_app_done();
+
+    CDBG("%s: END\n", __func__);
+}
+
+static void mm_app_snapshot_notify_cb(mm_camera_super_buf_t *bufs,
+                                      void *user_data)
+{
+
+    int rc = 0;
+    uint32_t i = 0;
+    mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *p_stream = NULL;
+    mm_camera_stream_t *m_stream = NULL;
+    mm_camera_buf_def_t *p_frame = NULL;
+    mm_camera_buf_def_t *m_frame = NULL;
+
+    CDBG("%s: BEGIN\n", __func__);
+
+    /* find channel */
+    for (i = 0; i < MM_CHANNEL_TYPE_MAX; i++) {
+        if (pme->channels[i].ch_id == bufs->ch_id) {
+            channel = &pme->channels[i];
+            break;
+        }
+    }
+    if (NULL == channel) {
+        CDBG_ERROR("%s: Wrong channel id (%d)", __func__, bufs->ch_id);
+        rc = -1;
+        goto error;
+    }
+
+    /* find snapshot stream */
+    for (i = 0; i < channel->num_streams; i++) {
+        if (channel->streams[i].s_config.stream_info->stream_type == CAM_STREAM_TYPE_SNAPSHOT) {
+            m_stream = &channel->streams[i];
+            break;
+        }
+    }
+    if (NULL == m_stream) {
+        CDBG_ERROR("%s: cannot find snapshot stream", __func__);
+        rc = -1;
+        goto error;
+    }
+
+    /* find snapshot frame */
+    for (i = 0; i < bufs->num_bufs; i++) {
+        if (bufs->bufs[i]->stream_id == m_stream->s_id) {
+            m_frame = bufs->bufs[i];
+            break;
+        }
+    }
+    if (NULL == m_frame) {
+        CDBG_ERROR("%s: main frame is NULL", __func__);
+        rc = -1;
+        goto error;
+    }
+
+    mm_app_dump_frame(m_frame, "main", "yuv", m_frame->frame_idx);
+
+    /* find postview stream */
+    for (i = 0; i < channel->num_streams; i++) {
+        if (channel->streams[i].s_config.stream_info->stream_type == CAM_STREAM_TYPE_POSTVIEW) {
+            p_stream = &channel->streams[i];
+            break;
+        }
+    }
+    if (NULL != p_stream) {
+        /* find preview frame */
+        for (i = 0; i < bufs->num_bufs; i++) {
+            if (bufs->bufs[i]->stream_id == p_stream->s_id) {
+                p_frame = bufs->bufs[i];
+                break;
+            }
+        }
+        if (NULL != p_frame) {
+            mm_app_dump_frame(p_frame, "postview", "yuv", p_frame->frame_idx);
+        }
+    }
+
+    mm_app_cache_ops((mm_camera_app_meminfo_t *)m_frame->mem_info,
+                     ION_IOC_CLEAN_INV_CACHES);
+
+    pme->jpeg_buf.buf.buffer = (uint8_t *)malloc(m_frame->frame_len);
+    if ( NULL == pme->jpeg_buf.buf.buffer ) {
+        CDBG_ERROR("%s: error allocating jpeg output buffer", __func__);
+        goto error;
+    }
+
+    pme->jpeg_buf.buf.frame_len = m_frame->frame_len;
+    /* create a new jpeg encoding session */
+    rc = createEncodingSession(pme, m_stream, m_frame);
+    if (0 != rc) {
+        CDBG_ERROR("%s: error creating jpeg session", __func__);
+        free(pme->jpeg_buf.buf.buffer);
+        goto error;
+    }
+
+    /* start jpeg encoding job */
+    rc = encodeData(pme, bufs, m_stream);
+    if (0 != rc) {
+        CDBG_ERROR("%s: error creating jpeg session", __func__);
+        free(pme->jpeg_buf.buf.buffer);
+        goto error;
+    }
+
+error:
+    /* buf done rcvd frames in error case */
+    if ( 0 != rc ) {
+        for (i=0; i<bufs->num_bufs; i++) {
+            if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                                    bufs->ch_id,
+                                                    bufs->bufs[i])) {
+                CDBG_ERROR("%s: Failed in Qbuf\n", __func__);
+            }
+            mm_app_cache_ops((mm_camera_app_meminfo_t *)bufs->bufs[i]->mem_info,
+                             ION_IOC_INV_CACHES);
+        }
+    }
+
+    CDBG("%s: END\n", __func__);
+}
+
+mm_camera_channel_t * mm_app_add_snapshot_channel(mm_camera_test_obj_t *test_obj)
+{
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *stream = NULL;
+
+    channel = mm_app_add_channel(test_obj,
+                                 MM_CHANNEL_TYPE_SNAPSHOT,
+                                 NULL,
+                                 NULL,
+                                 NULL);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return NULL;
+    }
+
+    stream = mm_app_add_snapshot_stream(test_obj,
+                                        channel,
+                                        mm_app_snapshot_notify_cb,
+                                        (void *)test_obj,
+                                        1,
+                                        1);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add snapshot stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return NULL;
+    }
+
+    return channel;
+}
+
+mm_camera_stream_t * mm_app_add_postview_stream(mm_camera_test_obj_t *test_obj,
+                                                mm_camera_channel_t *channel,
+                                                mm_camera_buf_notify_t stream_cb,
+                                                void *userdata,
+                                                uint8_t num_bufs,
+                                                uint8_t num_burst)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    cam_capability_t *cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+
+    stream = mm_app_add_stream(test_obj, channel);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        return NULL;
+    }
+
+    stream->s_config.mem_vtbl.get_bufs = mm_app_stream_initbuf;
+    stream->s_config.mem_vtbl.put_bufs = mm_app_stream_deinitbuf;
+    stream->s_config.mem_vtbl.clean_invalidate_buf =
+      mm_app_stream_clean_invalidate_buf;
+    stream->s_config.mem_vtbl.invalidate_buf = mm_app_stream_invalidate_buf;
+    stream->s_config.mem_vtbl.user_data = (void *)stream;
+    stream->s_config.stream_cb = stream_cb;
+    stream->s_config.userdata = userdata;
+    stream->num_of_bufs = num_bufs;
+
+    stream->s_config.stream_info = (cam_stream_info_t *)stream->s_info_buf.buf.buffer;
+    memset(stream->s_config.stream_info, 0, sizeof(cam_stream_info_t));
+    stream->s_config.stream_info->stream_type = CAM_STREAM_TYPE_POSTVIEW;
+    if (num_burst == 0) {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    } else {
+        stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_BURST;
+        stream->s_config.stream_info->num_of_burst = num_burst;
+    }
+    stream->s_config.stream_info->fmt = DEFAULT_PREVIEW_FORMAT;
+    stream->s_config.stream_info->dim.width = DEFAULT_PREVIEW_WIDTH;
+    stream->s_config.stream_info->dim.height = DEFAULT_PREVIEW_HEIGHT;
+    stream->s_config.padding_info = cam_cap->padding_info;
+
+    rc = mm_app_config_stream(test_obj, channel, stream, &stream->s_config);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:config preview stream err=%d\n", __func__, rc);
+        return NULL;
+    }
+
+    return stream;
+}
+
+int mm_app_start_capture_raw(mm_camera_test_obj_t *test_obj, uint8_t num_snapshots)
+{
+    int32_t rc = MM_CAMERA_OK;
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *s_main = NULL;
+    mm_camera_channel_attr_t attr;
+
+    memset(&attr, 0, sizeof(mm_camera_channel_attr_t));
+    attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_BURST;
+    attr.max_unmatched_frames = 3;
+    channel = mm_app_add_channel(test_obj,
+                                 MM_CHANNEL_TYPE_CAPTURE,
+                                 &attr,
+                                 mm_app_snapshot_notify_cb_raw,
+                                 test_obj);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    test_obj->buffer_format = DEFAULT_RAW_FORMAT;
+    s_main = mm_app_add_raw_stream(test_obj,
+                                   channel,
+                                   mm_app_snapshot_notify_cb_raw,
+                                   test_obj,
+                                   num_snapshots,
+                                   num_snapshots);
+    if (NULL == s_main) {
+        CDBG_ERROR("%s: add main snapshot stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    rc = mm_app_start_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:start zsl failed rc=%d\n", __func__, rc);
+        mm_app_del_stream(test_obj, channel, s_main);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    return rc;
+}
+
+int mm_app_stop_capture_raw(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *ch = NULL;
+    int i;
+
+    ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_CAPTURE);
+
+    rc = mm_app_stop_channel(test_obj, ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:stop recording failed rc=%d\n", __func__, rc);
+    }
+
+    for ( i = 0 ; i < ch->num_streams ; i++ ) {
+        mm_app_del_stream(test_obj, ch, &ch->streams[i]);
+    }
+
+    mm_app_del_channel(test_obj, ch);
+
+    return rc;
+}
+
+int mm_app_start_capture(mm_camera_test_obj_t *test_obj,
+                         uint8_t num_snapshots)
+{
+    int32_t rc = MM_CAMERA_OK;
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *s_main = NULL;
+    mm_camera_stream_t *s_metadata = NULL;
+    mm_camera_channel_attr_t attr;
+
+    memset(&attr, 0, sizeof(mm_camera_channel_attr_t));
+    attr.notify_mode = MM_CAMERA_SUPER_BUF_NOTIFY_CONTINUOUS;
+    attr.max_unmatched_frames = 3;
+    channel = mm_app_add_channel(test_obj,
+                                 MM_CHANNEL_TYPE_CAPTURE,
+                                 &attr,
+                                 mm_app_snapshot_notify_cb,
+                                 test_obj);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return -MM_CAMERA_E_GENERAL;
+    }
+    s_metadata = mm_app_add_metadata_stream(test_obj,
+                                            channel,
+                                            mm_app_snapshot_metadata_notify_cb,
+                                            (void *)test_obj,
+                                            CAPTURE_BUF_NUM);
+     if (NULL == s_metadata) {
+        CDBG_ERROR("%s: add metadata stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    s_main = mm_app_add_snapshot_stream(test_obj,
+                                        channel,
+                                        NULL,
+                                        NULL,
+                                        CAPTURE_BUF_NUM,
+                                        num_snapshots);
+    if (NULL == s_main) {
+        CDBG_ERROR("%s: add main snapshot stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    rc = mm_app_start_channel(test_obj, channel);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:start zsl failed rc=%d\n", __func__, rc);
+        mm_app_del_stream(test_obj, channel, s_main);
+        mm_app_del_stream(test_obj, channel, s_metadata);
+        mm_app_del_channel(test_obj, channel);
+        return rc;
+    }
+
+    return rc;
+}
+
+int mm_app_stop_capture(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *ch = NULL;
+
+    ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_CAPTURE);
+
+    rc = mm_app_stop_and_del_channel(test_obj, ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:stop capture channel failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
+
+int mm_app_take_picture(mm_camera_test_obj_t *test_obj, uint8_t is_burst_mode)
+{
+    CDBG_HIGH("\nEnter %s!!\n",__func__);
+    int rc = MM_CAMERA_OK;
+    uint8_t num_snapshot = 1;
+    int num_rcvd_snapshot = 0;
+
+    if (is_burst_mode)
+       num_snapshot = 6;
+
+    //stop preview before starting capture.
+    rc = mm_app_stop_preview(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: stop preview failed before capture!!, err=%d\n",__func__, rc);
+        return rc;
+    }
+
+    rc = mm_app_start_capture(test_obj, num_snapshot);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: mm_app_start_capture(), err=%d\n", __func__,rc);
+        return rc;
+    }
+    while (num_rcvd_snapshot < num_snapshot) {
+        CDBG_HIGH("\nWaiting mm_camera_app_wait !!\n");
+        mm_camera_app_wait();
+        num_rcvd_snapshot++;
+    }
+    rc = mm_app_stop_capture(test_obj);
+    if (rc != MM_CAMERA_OK) {
+       CDBG_ERROR("%s: mm_app_stop_capture(), err=%d\n",__func__, rc);
+       return rc;
+    }
+    //start preview after capture.
+    rc = mm_app_start_preview(test_obj);
+    if (rc != MM_CAMERA_OK) {
+        CDBG_ERROR("%s: start preview failed after capture!!, err=%d\n",__func__,rc);
+    }
+    return rc;
+}
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_socket.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_socket.c
new file mode 100644
index 0000000..28b2971
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_socket.c
@@ -0,0 +1,865 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+#include "mm_qcamera_socket.h"
+#include "mm_qcamera_commands.h"
+#include "mm_qcamera_dbg.h"
+
+#define IP_ADDR                  "127.0.0.1"
+#define TUNING_CHROMATIX_PORT     55555
+#define TUNING_PREVIEW_PORT       55556
+
+#define CURRENT_COMMAND_ACK_SUCCESS 1
+#define CURRENT_COMMAND_ACK_FAILURE 2
+
+pthread_t eztune_thread_id;
+
+static ssize_t tuneserver_send_command_rsp(tuningserver_t *tsctrl,
+  char *send_buf, uint32_t send_len)
+{
+  ssize_t rc;
+
+  /* send ack back to client upon req */
+  if (send_len <= 0) {
+    ALOGE("%s:Invalid send len \n", __func__);
+    return -1;
+  }
+  if (send_buf == NULL) {
+    ALOGE("%s:Invalid send buf \n", __func__);
+    return -1;
+  }
+
+  rc = send(tsctrl->clientsocket_id, send_buf, send_len, 0);
+  if (rc < 0) {
+    ALOGE("%s:RSP send returns error %s\n", __func__, strerror(errno));
+  } else {
+    rc = 0;
+  }
+
+  if (send_buf != NULL) {
+    free(send_buf);
+    send_buf = NULL;
+  }
+  return rc;
+}
+
+static void release_eztune_prevcmd_rsp(eztune_prevcmd_rsp *pHead)
+{
+  if (pHead != NULL ) {
+    release_eztune_prevcmd_rsp((eztune_prevcmd_rsp *)pHead->next);
+    free(pHead);
+  }
+}
+
+static ssize_t tuneserver_ack(uint16_t a, uint32_t b, tuningserver_t *tsctrl)
+{
+  ssize_t rc;
+  char ack_1[6];
+  /*Ack the command here*/
+  memcpy(ack_1, &a, 2);
+  memcpy(ack_1+2, &b, 4);
+  /* send echo back to client upon accept */
+  rc = send(tsctrl->clientsocket_id, &ack_1, sizeof(ack_1), 0);
+  if (rc < 0) {
+    ALOGE("%s: eztune_server_run: send returns error %s\n", __func__,
+      strerror(errno));
+    return rc;
+  } else if (rc < (int32_t)sizeof(ack_1)) {
+    /*Shouldn't hit this for packets <1K; need to re-send if we do*/
+  }
+  return 0;
+}
+
+static ssize_t tuneserver_send_command_ack( uint8_t ack,
+    tuningserver_t *tsctrl)
+{
+  ssize_t rc;
+  /* send ack back to client upon req */
+  rc = send(tsctrl->clientsocket_id, &ack, sizeof(ack), 0);
+  if (rc < 0) {
+    ALOGE("%s:ACK send returns error %s\n", __func__, strerror(errno));
+    return rc;
+  }
+  return 0;
+}
+
+/** tuneserver_process_command
+ *    @tsctrl: the server control object
+ *
+ *  Processes the command that the client sent
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+static int32_t tuneserver_process_command(tuningserver_t *tsctrl,
+  char *send_buf, uint32_t send_len)
+{
+  tuneserver_protocol_t *p = tsctrl->proto;
+  int result = 0;
+
+  CDBG("%s: Current command is %d\n", __func__, p->current_cmd);
+  switch (p->current_cmd) {
+  case TUNESERVER_GET_LIST:
+    if(tuneserver_send_command_ack(CURRENT_COMMAND_ACK_SUCCESS, tsctrl)) {
+      ALOGE("%s: Ack Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    result = tuneserver_process_get_list_cmd(tsctrl, p->recv_buf,
+      send_buf, send_len);
+    if (result < 0) {
+      ALOGE("%s: RSP processing Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    if(tuneserver_send_command_rsp(tsctrl, send_buf, send_len)) {
+      ALOGE("%s: RSP Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    break;
+
+  case TUNESERVER_GET_PARMS:
+    if(tuneserver_send_command_ack(CURRENT_COMMAND_ACK_SUCCESS, tsctrl)) {
+      ALOGE("%s: Ack Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    result = tuneserver_process_get_params_cmd(tsctrl, p->recv_buf,
+      send_buf, send_len);
+    if (result < 0) {
+      ALOGE("%s: RSP processing Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    if(tuneserver_send_command_rsp(tsctrl, send_buf, send_len)) {
+      ALOGE("%s: RSP Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    break;
+
+  case TUNESERVER_SET_PARMS:
+    if(tuneserver_send_command_ack(CURRENT_COMMAND_ACK_SUCCESS, tsctrl)) {
+      ALOGE("%s: Ack Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    result = tuneserver_process_set_params_cmd(tsctrl, p->recv_buf,
+      send_buf, send_len);
+    if (result < 0) {
+      ALOGE("%s: RSP processing Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    if(tuneserver_send_command_rsp(tsctrl, send_buf, send_len)) {
+      ALOGE("%s: RSP Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    break;
+
+  case TUNESERVER_MISC_CMDS: {
+    if(tuneserver_send_command_ack(CURRENT_COMMAND_ACK_SUCCESS, tsctrl)) {
+      ALOGE("%s: Ack Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    result = tuneserver_process_misc_cmd(tsctrl, p->recv_buf,
+      send_buf, send_len);
+    if (result < 0) {
+      ALOGE("%s: RSP processing Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    if(tuneserver_send_command_rsp(tsctrl, send_buf, send_len)) {
+      ALOGE("%s: RSP Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    break;
+  }
+
+  default:
+    if(tuneserver_send_command_ack(CURRENT_COMMAND_ACK_SUCCESS, tsctrl)) {
+      ALOGE("%s: Ack Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    ALOGE("%s: p->current_cmd: default\n", __func__);
+    result = -1;
+    break;
+  }
+
+  return result;
+}
+
+/** tuneserver_process_client_message
+ *    @recv_buffer: received message from the client
+ *    @tsctrl: the server control object
+ *
+ *  Processes the message from client and prepares for next
+ *  message.
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+static int32_t tuneserver_process_client_message(void *recv_buffer,
+  tuningserver_t *tsctrl)
+{
+  int rc = 0;
+  tuneserver_protocol_t *p = tsctrl->proto;
+
+  switch (tsctrl->proto->next_recv_code) {
+  case TUNESERVER_RECV_COMMAND:
+    p->current_cmd = *(uint16_t *)recv_buffer;
+    p->next_recv_code = TUNESERVER_RECV_PAYLOAD_SIZE;
+    p->next_recv_len = sizeof(uint32_t);
+    break;
+
+  case TUNESERVER_RECV_PAYLOAD_SIZE:
+    p->next_recv_code = TUNESERVER_RECV_PAYLOAD;
+    p->next_recv_len = *(uint32_t *)recv_buffer;
+    p->recv_len = p->next_recv_len;
+    if (p->next_recv_len > TUNESERVER_MAX_RECV)
+      return -1;
+    if (p->next_recv_len == 0) {
+      p->next_recv_code = TUNESERVER_RECV_RESPONSE;
+      p->next_recv_len = sizeof(uint32_t);
+    }
+    break;
+
+  case TUNESERVER_RECV_PAYLOAD:
+    p->recv_buf = malloc(p->next_recv_len);
+    if (!p->recv_buf) {
+      ALOGE("%s:Error allocating memory for recv_buf %s\n", __func__,
+        strerror(errno));
+      return -1;
+    }
+    memcpy(p->recv_buf, recv_buffer, p->next_recv_len);
+    p->next_recv_code = TUNESERVER_RECV_RESPONSE;
+    p->next_recv_len = sizeof(uint32_t);
+    /*Process current command at this point*/
+    break;
+
+  case TUNESERVER_RECV_RESPONSE:
+    p->next_recv_code = TUNESERVER_RECV_COMMAND;
+    p->next_recv_len = 2;
+    p->send_len = *(uint32_t *)recv_buffer;
+    p->send_buf =  (char *)calloc(p->send_len, sizeof(char *));
+    if (!p->send_buf) {
+      ALOGE("%s:Error allocating memory for send_buf %s\n", __func__,
+        strerror(errno));
+      return -1;
+    }
+    rc = tuneserver_process_command(tsctrl, p->send_buf, p->send_len);
+    free(p->recv_buf);
+    p->recv_buf = NULL;
+    p->recv_len = 0;
+    break;
+
+  default:
+    ALOGE("%s: p->next_recv_code: default\n", __func__);
+    rc = -1;
+    break;
+  }
+
+  return rc;
+}
+
+/** tuneserver_ack_onaccept_initprotocol
+ *    @tsctrl: the server control object
+ *
+ *  Acks a connection from the cient and sets up the
+ *  protocol object to start receiving commands.
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+static ssize_t tuneserver_ack_onaccept_initprotocol(tuningserver_t *tsctrl)
+{
+  ssize_t rc = 0;
+  uint32_t ack_status;
+
+  ALOGE("%s starts\n", __func__);
+/*
+  if(tsctrl->camera_running) {
+    ack_status = 1;
+  } else {
+    ack_status = 2;
+  }
+*/
+  ack_status = 1;
+
+  rc = tuneserver_ack(1, ack_status, tsctrl);
+
+  tsctrl->proto = malloc(sizeof(tuneserver_protocol_t));
+  if (!tsctrl->proto) {
+    ALOGE("%s: malloc returns NULL with error %s\n", __func__, strerror(errno));
+    return -1;
+  }
+
+  tsctrl->proto->current_cmd    = 0xFFFF;
+  tsctrl->proto->next_recv_code = TUNESERVER_RECV_COMMAND;
+  tsctrl->proto->next_recv_len  = 2;
+  tsctrl->proto->recv_buf       = NULL;
+  tsctrl->proto->send_buf       = NULL;
+
+  CDBG("%s end\n", __func__);
+
+  return rc;
+}
+
+/** tuneserver_check_status
+ *    @tsctrl: the server control object
+ *
+ *  Checks if camera is running and stops it.
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+#if 0
+static void tuneserver_check_status(tuningserver_t *tsctrl)
+{
+  if (tsctrl->camera_running == 1) {
+    /*TODO: Stop camera here*/
+    tuneserver_stop_cam(&tsctrl->lib_handle);
+  }
+  tsctrl->camera_running = 0;
+
+  tuneserver_close_cam(&tsctrl->lib_handle);
+}
+#endif
+
+static ssize_t prevserver_send_command_rsp(tuningserver_t *tsctrl,
+  char *send_buf, uint32_t send_len)
+{
+  ssize_t rc;
+
+  /* send ack back to client upon req */
+  if (send_len <= 0) {
+    ALOGE("%s:Invalid send len \n", __func__);
+    return -1;
+  }
+  if (send_buf == NULL) {
+    ALOGE("%s:Invalid send buf \n", __func__);
+    return -1;
+  }
+
+  rc = send(tsctrl->pr_clientsocket_id, send_buf, send_len, 0);
+  if (rc < 0) {
+    ALOGE("%s:RSP send returns error %s\n", __func__, strerror(errno));
+  } else {
+    rc = 0;
+  }
+  if (send_buf != NULL) {
+    free(send_buf);
+    send_buf = NULL;
+  }
+  return rc;
+}
+
+static void prevserver_init_protocol(tuningserver_t *tsctrl)
+{
+  tsctrl->pr_proto = malloc(sizeof(prserver_protocol_t));
+  if (!tsctrl->pr_proto) {
+    ALOGE("%s: malloc returns NULL with error %s\n",
+     __func__, strerror(errno));
+    return;
+  }
+
+  tsctrl->pr_proto->current_cmd    = 0xFFFF;
+  tsctrl->pr_proto->next_recv_code = TUNE_PREV_RECV_COMMAND;
+  tsctrl->pr_proto->next_recv_len  = 2;
+}
+
+static int32_t prevserver_process_command(
+  tuningserver_t *tsctrl, char **send_buf, uint32_t *send_len)
+{
+  prserver_protocol_t *p = tsctrl->pr_proto;
+  int result = 0;
+  eztune_prevcmd_rsp *rsp_ptr=NULL, *rspn_ptr=NULL, *head_ptr=NULL;
+
+  CDBG("%s: Current command is %d\n", __func__, p->current_cmd);
+  switch (p->current_cmd) {
+  case TUNE_PREV_GET_INFO:
+    result = tuneserver_preview_getinfo(tsctrl, send_buf, send_len);
+    if (result < 0) {
+      ALOGE("%s: RSP processing Failed for cmd %d\n", __func__,
+        p->current_cmd);
+      return -1;
+    }
+    rsp_ptr = (eztune_prevcmd_rsp *)*send_buf;
+    if ((!rsp_ptr) || (!rsp_ptr->send_buf)) {
+      ALOGE("%s: RSP ptr is NULL %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    if (prevserver_send_command_rsp(tsctrl,
+      rsp_ptr->send_buf, rsp_ptr->send_len)) {
+      ALOGE("%s: RSP Failed for TUNE_PREV_GET_INFO ver cmd %d\n", __func__,
+        p->current_cmd);
+      return -1;
+    }
+    rspn_ptr = (eztune_prevcmd_rsp *)rsp_ptr->next;
+    if ((!rspn_ptr) || (!rspn_ptr->send_buf)) {
+      ALOGE("%s: RSP1 ptr is NULL %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    if (prevserver_send_command_rsp(tsctrl,
+        rspn_ptr->send_buf, rspn_ptr->send_len)) {
+      ALOGE("%s: RSP Failed for TUNE_PREV_GET_INFO caps cmd %d\n", __func__,
+        p->current_cmd);
+      return -1;
+    }
+    free(rspn_ptr);
+    free(rsp_ptr);
+    break;
+
+  case TUNE_PREV_CH_CNK_SIZE:
+    result = tuneserver_preview_getchunksize(tsctrl, send_buf, send_len);
+    if (result < 0) {
+      ALOGE("%s: RSP processing Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    if (prevserver_send_command_rsp(tsctrl, *send_buf, *send_len)) {
+      ALOGE("%s: RSP Failed for TUNE_PREV_CH_CNK_SIZE cmd %d\n", __func__,
+        p->current_cmd);
+      return -1;
+    }
+    break;
+
+  case TUNE_PREV_GET_PREV_FRAME:
+    result = tuneserver_preview_getframe(tsctrl, send_buf, send_len);
+    if (result < 0) {
+      ALOGE("%s: RSP processing Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    rsp_ptr = (eztune_prevcmd_rsp *)*send_buf;
+    if ((!rsp_ptr) || (!rsp_ptr->send_buf)) {
+      ALOGE("%s: RSP ptr is NULL %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    head_ptr = rsp_ptr;
+
+    while (rsp_ptr != NULL) {
+      if ((!rsp_ptr) || (!rsp_ptr->send_buf)) {
+        ALOGE("%s: RSP ptr is NULL %d\n", __func__, p->current_cmd);
+        return -1;
+      }
+      if (prevserver_send_command_rsp(tsctrl,
+        rsp_ptr->send_buf, rsp_ptr->send_len)) {
+        ALOGE("%s: RSP Failed for TUNE_PREV_GET_INFO ver cmd %d\n", __func__,
+          p->current_cmd);
+        return -1;
+      }
+      rsp_ptr = (eztune_prevcmd_rsp *)rsp_ptr->next;
+    }
+    release_eztune_prevcmd_rsp(head_ptr);
+    break;
+
+  case TUNE_PREV_GET_JPG_SNAP:
+  case TUNE_PREV_GET_RAW_SNAP:
+  case TUNE_PREV_GET_RAW_PREV:
+    result = tuneserver_preview_unsupported(tsctrl, send_buf, send_len);
+    if (result < 0) {
+       ALOGE("%s:RSP processing Failed for cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    if (prevserver_send_command_rsp(tsctrl, *send_buf, *send_len)) {
+      ALOGE("%s:RSP Failed for UNSUPPORTED cmd %d\n", __func__, p->current_cmd);
+      return -1;
+    }
+    break;
+
+  default:
+    ALOGE("%s: p->current_cmd: default\n", __func__);
+    result = -1;
+    break;
+  }
+
+  return result;
+}
+
+/** previewserver_process_client_message
+ *    @recv_buffer: received message from the client
+ *    @tsctrl: the server control object
+ *
+ *  Processes the message from client and prepares for next
+ *  message.
+ *
+ *  Return: >=0 on success, -1 on failure.
+ **/
+static int32_t prevserver_process_client_message(void *recv_buffer,
+  tuningserver_t *tsctrl)
+{
+  int rc = 0;
+  prserver_protocol_t *p = tsctrl->pr_proto;
+
+  switch (p->next_recv_code) {
+  case TUNE_PREV_RECV_COMMAND:
+    CDBG("%s  %d\n", __func__, __LINE__);
+    p->current_cmd = *(uint16_t *)recv_buffer;
+    if(p->current_cmd != TUNE_PREV_CH_CNK_SIZE) {
+      rc = prevserver_process_command(tsctrl,
+        &p->send_buf, (uint32_t *)&p->send_len);
+      break;
+    }
+    p->next_recv_code = TUNE_PREV_RECV_NEWCNKSIZE;
+    p->next_recv_len = sizeof(uint32_t);
+    CDBG("%s TUNE_PREV_COMMAND X\n", __func__);
+    break;
+  case TUNE_PREV_RECV_NEWCNKSIZE:
+    CDBG("%s  %d\n", __func__, __LINE__);
+    p->new_cnk_size = *(uint32_t *)recv_buffer;
+    p->next_recv_code = TUNE_PREV_RECV_COMMAND;
+    p->next_recv_len  = 2;
+    rc = prevserver_process_command(tsctrl,
+      &p->send_buf, (uint32_t *)&p->send_len);
+    break;
+  default:
+    ALOGE("%s prev_proc->next_recv_code: default\n", __func__);
+    rc = -1;
+    break;
+  }
+
+  return rc;
+}
+
+/** tunning_server_socket_listen
+ *    @ip_addr: the ip addr to listen
+ *    @port: the port to listen
+ *
+ *  Setup a listen socket for eztune.
+ *
+ *  Return: >0 on success, <=0 on failure.
+ **/
+int tunning_server_socket_listen(const char* ip_addr, uint16_t port)
+{
+  int sock_fd = -1;
+  mm_qcamera_sock_addr_t server_addr;
+  int result;
+  int option;
+  int socket_flag;
+
+  memset(&server_addr, 0, sizeof(server_addr));
+  server_addr.addr_in.sin_family = AF_INET;
+  server_addr.addr_in.sin_port = (__be16) htons(port);
+  server_addr.addr_in.sin_addr.s_addr = inet_addr(ip_addr);
+
+  if (server_addr.addr_in.sin_addr.s_addr == INADDR_NONE) {
+    ALOGE("[ERR] %s invalid address.\n", __func__);
+    return -1;
+  }
+
+  /* Create an AF_INET stream socket to receive incoming connection ON */
+  sock_fd = socket(AF_INET, SOCK_STREAM, 0);
+  if (sock_fd < 0) {
+    ALOGE("[ERR] %s socket failed\n", __func__);
+    return sock_fd;
+  }
+
+  // set listen socket to non-block, but why??
+  socket_flag = fcntl(sock_fd, F_GETFL, 0);
+  fcntl(sock_fd, F_SETFL, socket_flag | O_NONBLOCK);
+
+  /* reuse in case it is in timeout */
+  option = 1;
+  result = setsockopt(sock_fd, SOL_SOCKET, SO_REUSEADDR,
+    &option, sizeof(option));
+
+  if (result < 0) {
+    ALOGE("eztune setsockopt failed");
+    close(sock_fd);
+    sock_fd = -1;
+    return sock_fd;
+  }
+
+  result = bind(sock_fd, &server_addr.addr, sizeof(server_addr.addr_in));
+  if (result < 0) {
+    ALOGE("eztune socket bind failed");
+    close(sock_fd);
+    sock_fd = -1;
+    return sock_fd;
+  }
+
+  result = listen(sock_fd, 1);
+  if (result < 0) {
+    ALOGE("eztune socket listen failed");
+    close(sock_fd);
+    sock_fd = -1;
+    return sock_fd;
+  }
+
+  CDBG_HIGH("%s. sock_fd: %d, listen at port: %d\n", __func__, sock_fd, port);
+
+  return sock_fd;
+}
+
+/** main
+ *
+ *  Creates the server, and starts waiting for
+ *  connections/messages from a prospective
+ *  client
+ *
+ **/
+void *eztune_proc(void *data)
+{
+  int server_socket = -1, client_socket = -1;
+  int prev_server_socket = -1, prev_client_socket = -1;
+
+  mm_qcamera_sock_addr_t addr_client_inet;
+  socklen_t addr_client_len = sizeof(addr_client_inet.addr_in);
+  int result;
+  fd_set tsfds;
+  int num_fds = 0;
+  ssize_t recv_bytes;
+  char buf[TUNESERVER_MAX_RECV];
+
+  mm_camera_lib_handle *lib_handle = (mm_camera_lib_handle *)data;
+
+  ALOGE(">>> Starting tune server <<< \n");
+
+  // for eztune chromatix params
+  server_socket = tunning_server_socket_listen(IP_ADDR, TUNING_CHROMATIX_PORT);
+  if (server_socket <= 0) {
+    ALOGE("[ERR] fail to setup listen socket for eztune chromatix parms...");
+    return NULL;
+  }
+  prev_server_socket = tunning_server_socket_listen(IP_ADDR, TUNING_PREVIEW_PORT);
+  if (prev_server_socket <= 0) {
+    ALOGE("[ERR] fail to setup listen socket for eztune preview...\n");
+    return NULL;
+  }
+  num_fds = TUNESERVER_MAX(server_socket, prev_server_socket);
+  CDBG_HIGH("num_fds = %d\n", num_fds);
+
+  do {
+    FD_ZERO(&tsfds);
+    FD_SET(server_socket, &tsfds);
+    FD_SET(prev_server_socket, &tsfds);
+    if (client_socket > 0) {
+      FD_SET(client_socket, &tsfds);
+    }
+    if (prev_client_socket > 0) {
+      FD_SET( prev_client_socket, &tsfds);
+    }
+
+    /* no timeout */
+    result = select(num_fds + 1, &tsfds, NULL, NULL, NULL);
+    if (result < 0) {
+      ALOGE("[ERR] select failed: %s\n", strerror(errno));
+      continue;
+    }
+
+    /*
+     ** (1) CHROMATIX SERVER
+     */
+    if (FD_ISSET(server_socket, &tsfds)) {
+      CDBG("Receiving New client connection\n");
+
+      client_socket = accept(server_socket,
+        &addr_client_inet.addr, &addr_client_len);
+      if (client_socket == -1) {
+        ALOGE("accept failed %s", strerror(errno));
+        continue;
+      }
+
+      ALOGE("accept a new connect on 55555, sd(%d)\n", client_socket);
+      num_fds = TUNESERVER_MAX(num_fds, client_socket);
+
+      // open camera and get handle - this is needed to
+      // be able to set parameters without starting
+      // preview stream
+      /*if (!tsctrl.camera_running) {
+        result = tuneserver_open_cam(&tsctrl.lib_handle, &tsctrl);
+        if(result) {
+          printf("\n Camera Open Fail !!! \n");
+          close(server_socket);
+          return EXIT_FAILURE;
+        }
+      }*/
+      result = tuneserver_open_cam(lib_handle);
+      if(result) {
+        ALOGE("\n Tuning Library open failed!!!\n");
+        close(server_socket);
+        return NULL;
+      }
+      lib_handle->tsctrl.clientsocket_id = client_socket;
+      if (tuneserver_ack_onaccept_initprotocol(&lib_handle->tsctrl) < 0) {
+        ALOGE("%s: Error while acking\n", __func__);
+        close(client_socket);
+        continue;
+      }
+      tuneserver_initialize_tuningp(lib_handle, client_socket,
+        lib_handle->tsctrl.proto->send_buf, lib_handle->tsctrl.proto->send_len);
+    }
+
+    if (FD_ISSET(client_socket, &tsfds)) {
+      if (lib_handle->tsctrl.proto == NULL) {
+        ALOGE("%s: Cannot receive msg without connect\n", __func__);
+        continue;
+      }
+
+      /*Receive message and process it*/
+      recv_bytes = recv(client_socket, (void *)buf,
+        lib_handle->tsctrl.proto->next_recv_len, 0);
+      CDBG("Receive %lld bytes \n", (long long int) recv_bytes);
+
+      if (recv_bytes == -1) {
+        ALOGE("%s: Receive failed with error %s\n", __func__, strerror(errno));
+        //tuneserver_check_status(&tsctrl);
+        continue;
+      } else if (recv_bytes == 0) {
+        ALOGE("%s %d: connection has been terminated\n", __func__, __LINE__);
+
+        tuneserver_deinitialize_tuningp(&lib_handle->tsctrl, client_socket,
+          lib_handle->tsctrl.proto->send_buf,
+          lib_handle->tsctrl.proto->send_len);
+        free(lib_handle->tsctrl.proto);
+        lib_handle->tsctrl.proto = NULL;
+
+        close(client_socket);
+        client_socket = -1;
+        //tuneserver_check_status(&tsctrl);
+      } else {
+        CDBG("%s: Processing socket command\n", __func__);
+
+        result = tuneserver_process_client_message(buf, &lib_handle->tsctrl);
+
+        if (result < 0) {
+          ALOGE("%s %d Protocol violated\n", __func__, __LINE__);
+
+          free(lib_handle->tsctrl.proto);
+          lib_handle->tsctrl.proto = NULL;
+
+          close(client_socket);
+          client_socket = -1;
+          //tuneserver_check_status(&tsctrl);
+          continue;
+        }
+      }
+    }
+
+    /*
+     ** (2) PREVIEW SERVER
+     */
+    if (FD_ISSET(prev_server_socket, &tsfds)) {
+      CDBG("Receiving New Preview client connection\n");
+
+      prev_client_socket = accept(prev_server_socket,
+        &addr_client_inet.addr, &addr_client_len);
+      if (prev_client_socket == -1) {
+        ALOGE("accept failed %s", strerror(errno));
+        continue;
+      }
+
+      lib_handle->tsctrl.pr_clientsocket_id = prev_client_socket;
+
+      CDBG("Accepted a new connection, fd(%d)\n", prev_client_socket);
+      num_fds = TUNESERVER_MAX(num_fds, prev_client_socket);
+
+      // start camera
+      /*if (!tsctrl.camera_running) {
+        result = 0;
+        result = tuneserver_open_cam(&tsctrl.lib_handle, &tsctrl);
+        if(result) {
+          printf("\n Camera Open Fail !!! \n");
+          return EXIT_FAILURE;
+        }
+      }*/
+      cam_dimension_t dim;
+      //dim.width = lib_handle->test_obj.buffer_width;
+      //dim.height = lib_handle->test_obj.buffer_height;
+      dim.width = DEFAULT_PREVIEW_WIDTH;
+      dim.height = DEFAULT_PREVIEW_HEIGHT;
+
+      CDBG("preview dimension info: w(%d), h(%d)\n", dim.width, dim.height);
+      // we have to make sure that camera is running, before init connection,
+      // because we need to know the frame size for allocating the memory.
+      prevserver_init_protocol(&lib_handle->tsctrl);
+
+      result = tuneserver_initialize_prevtuningp(lib_handle, prev_client_socket,
+        dim, (char **)&lib_handle->tsctrl.proto->send_buf,
+        &lib_handle->tsctrl.proto->send_len);
+      if (result < 0) {
+        ALOGE("tuneserver_initialize_prevtuningp error!");
+        close(prev_client_socket);
+        prev_client_socket = -1;
+      }
+    }
+
+    if (FD_ISSET(prev_client_socket, &tsfds)) {
+      recv_bytes = recv(prev_client_socket, (void *)buf,
+        lib_handle->tsctrl.pr_proto->next_recv_len, 0);
+
+      CDBG("%s prev_client_socket=%d\n", __func__, prev_client_socket);
+      CDBG("%s next_recv_len=%d\n", __func__, buf[0]+buf[1]*256);
+
+      if (recv_bytes <= 0) {
+        if (recv_bytes == 0) {
+          ALOGE("client close the connection.\n");
+        } else {
+          ALOGE("receive error: %s\n", strerror(errno));
+        }
+
+        //tuneserver_check_status(&tsctrl);
+        // if recv error, we should close the connection, free the proto data,
+        // AND wait for a new connecton..
+        // close_connection();
+        // stop_camera()
+        // cleanup_proto_data();
+        tuneserver_deinitialize_prevtuningp(&lib_handle->tsctrl,
+          (char **)&lib_handle->tsctrl.proto->send_buf,
+          &lib_handle->tsctrl.proto->send_len);
+        close(prev_client_socket);
+        prev_client_socket = -1;
+      } else {
+        result = prevserver_process_client_message((void *)buf,
+          &lib_handle->tsctrl);
+        if (result < 0) {
+          ALOGE("%s %d Protocol violated\n", __func__, __LINE__);
+
+          //free(tsctrl->preivew_proto);
+          //free(tsctrl);
+          //max_fd = ezt_parms_listen_sd + 1;
+          tuneserver_deinitialize_prevtuningp(&lib_handle->tsctrl,
+            (char **)&lib_handle->tsctrl.proto->send_buf,
+            &lib_handle->tsctrl.proto->send_len);
+          close(prev_client_socket);
+          prev_client_socket = -1;
+          //tuneserver_check_status(&tsctrl);
+        }
+        //sleep(1);
+      }
+    }
+  } while (1);
+
+  if (server_socket >= 0) {
+    close(server_socket);
+  }
+  if (client_socket >= 0) {
+    close(client_socket);
+  }
+  if (prev_server_socket >= 0) {
+    close(prev_server_socket);
+  }
+  if (prev_client_socket >= 0) {
+    close(prev_client_socket);
+  }
+
+  return EXIT_SUCCESS;
+}
+
+int eztune_server_start (void *lib_handle)
+{
+  return pthread_create(&eztune_thread_id, NULL,  eztune_proc, lib_handle);
+}
+
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_unit_test.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_unit_test.c
new file mode 100644
index 0000000..51f4f5d
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_unit_test.c
@@ -0,0 +1,694 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "mm_qcamera_dbg.h"
+#include "mm_qcamera_app.h"
+
+#define MM_QCAMERA_APP_UTEST_MAX_MAIN_LOOP 1
+#define MM_QCAMERA_APP_UTEST_OUTER_LOOP 1
+#define MM_QCAMERA_APP_UTEST_INNER_LOOP 1
+#define MM_QCAM_APP_TEST_NUM 128
+
+static mm_app_tc_t mm_app_tc[MM_QCAM_APP_TEST_NUM];
+
+int mm_app_tc_open_close(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i;
+    mm_camera_test_obj_t test_obj;
+
+    printf("\n Verifying open/close cameras...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+        sleep(1);
+        rc = mm_app_close(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_start_stop_preview(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+
+    printf("\n Verifying start/stop preview...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_INNER_LOOP; j++) {
+            rc = mm_app_start_preview(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_start_preview() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+            sleep(1);
+            rc = mm_app_stop_preview(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_stop_preview() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+
+        rc |= mm_app_close(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_start_stop_zsl(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+
+    printf("\n Verifying start/stop preview...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        for (j = 0; j < 1; j++) {
+            rc = mm_app_start_preview_zsl(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_start_preview_zsl() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+            sleep(1);
+            rc = mm_app_stop_preview_zsl(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_stop_preview_zsl() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+
+        rc = mm_app_close(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_start_stop_video_preview(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+
+    printf("\n Verifying start/stop video preview...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_INNER_LOOP; j++) {
+            rc = mm_app_start_record_preview(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_start_record_preview() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+            sleep(1);
+            rc = mm_app_stop_record_preview(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_stop_record_preview() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+
+        rc = mm_app_close(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_start_stop_video_record(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+
+    printf("\n Verifying start/stop recording...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        rc = mm_app_start_record_preview(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_start_record_preview() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            mm_app_close(&test_obj);
+            break;
+        }
+
+        sleep(1);
+
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_INNER_LOOP; j++) {
+            rc = mm_app_start_record(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_start_record() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+
+            sleep(1);
+
+            rc = mm_app_stop_record(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_stop_record() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:start/stop record cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            mm_app_stop_record_preview(&test_obj);
+            mm_app_close(&test_obj);
+            break;
+        }
+
+        rc = mm_app_stop_record_preview(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_stop_record_preview() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            mm_app_close(&test_obj);
+            break;
+        }
+
+        rc = mm_app_close(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_start_stop_live_snapshot(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+
+    printf("\n Verifying start/stop live snapshot...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        rc = mm_app_start_record_preview(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_start_record_preview() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            mm_app_close(&test_obj);
+            break;
+        }
+
+        sleep(1);
+
+        rc = mm_app_start_record(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_start_record() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            mm_app_stop_record_preview(&test_obj);
+            mm_app_close(&test_obj);
+            break;
+        }
+
+        sleep(1);
+
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_INNER_LOOP; j++) {
+            rc = mm_app_start_live_snapshot(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_start_live_snapshot() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+
+            /* wait for jpeg is done */
+            mm_camera_app_wait();
+
+            rc = mm_app_stop_live_snapshot(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s:mm_app_stop_live_snapshot() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:start/stop live snapshot cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            mm_app_stop_record(&test_obj);
+            mm_app_stop_record_preview(&test_obj);
+            mm_app_close(&test_obj);
+            break;
+        }
+
+        rc = mm_app_stop_record(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_stop_record() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            mm_app_stop_record_preview(&test_obj);
+            mm_app_close(&test_obj);
+            break;
+        }
+
+        sleep(1);
+
+        rc = mm_app_stop_record_preview(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_stop_record_preview() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            mm_app_close(&test_obj);
+            break;
+        }
+
+        rc = mm_app_close(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_capture_raw(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+    uint8_t num_snapshot = 1;
+    uint8_t num_rcvd_snapshot = 0;
+
+    printf("\n Verifying raw capture...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_INNER_LOOP; j++) {
+            rc = mm_app_start_capture_raw(&test_obj, num_snapshot);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_start_capture() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+            while (num_rcvd_snapshot < num_snapshot) {
+                mm_camera_app_wait();
+                num_rcvd_snapshot++;
+            }
+            rc = mm_app_stop_capture_raw(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_stop_capture() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+
+        rc |= mm_app_close(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_capture_regular(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+    uint8_t num_snapshot = 1;
+    uint8_t num_rcvd_snapshot = 0;
+
+    printf("\n Verifying capture...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_INNER_LOOP; j++) {
+            rc = mm_app_start_capture(&test_obj, num_snapshot);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_start_capture() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+            while (num_rcvd_snapshot < num_snapshot) {
+                mm_camera_app_wait();
+                num_rcvd_snapshot++;
+            }
+            rc = mm_app_stop_capture(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_stop_capture() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+
+        rc = mm_app_close(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_capture_burst(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+    uint8_t num_snapshot = 3;
+    uint8_t num_rcvd_snapshot = 0;
+
+    printf("\n Verifying capture...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_INNER_LOOP; j++) {
+            rc = mm_app_start_capture(&test_obj, num_snapshot);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_start_capture() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+            while (num_rcvd_snapshot < num_snapshot) {
+                mm_camera_app_wait();
+                num_rcvd_snapshot++;
+            }
+            rc = mm_app_stop_capture(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_stop_capture() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+
+        rc = mm_app_close(&test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_rdi_burst(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK, rc2 = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+
+    printf("\n Verifying rdi burst (3) capture...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_INNER_LOOP; j++) {
+            rc = mm_app_start_rdi(&test_obj, 3);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_start_preview() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+            sleep(1);
+            rc = mm_app_stop_rdi(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_stop_preview() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+
+        rc2 = mm_app_close(&test_obj);
+        if (rc2 != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc2);
+            if (rc == MM_CAMERA_OK) {
+                rc = rc2;
+            }
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_tc_rdi_cont(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK, rc2 = MM_CAMERA_OK;
+    int i, j;
+    mm_camera_test_obj_t test_obj;
+
+    printf("\n Verifying rdi continuous capture...\n");
+    for (i = 0; i < cam_app->num_cameras; i++) {
+        memset(&test_obj, 0, sizeof(mm_camera_test_obj_t));
+        rc = mm_app_open(cam_app, i, &test_obj);
+        if (rc != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_open() cam_idx=%d, err=%d\n",
+                       __func__, i, rc);
+            break;
+        }
+
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_INNER_LOOP; j++) {
+            rc = mm_app_start_rdi(&test_obj, 0);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_start_preview() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+            sleep(1);
+            rc = mm_app_stop_rdi(&test_obj);
+            if (rc != MM_CAMERA_OK) {
+                CDBG_ERROR("%s: mm_app_stop_preview() cam_idx=%d, err=%d\n",
+                           __func__, i, rc);
+                break;
+            }
+        }
+
+        rc2 = mm_app_close(&test_obj);
+        if (rc2 != MM_CAMERA_OK) {
+            CDBG_ERROR("%s:mm_app_close() cam_idx=%d, err=%d\n",
+                       __func__, i, rc2);
+            if (rc == MM_CAMERA_OK) {
+                rc = rc2;
+            }
+            break;
+        }
+    }
+    if (rc == MM_CAMERA_OK) {
+        printf("\nPassed\n");
+    } else {
+        printf("\nFailed\n");
+    }
+    CDBG("%s:END, rc = %d\n", __func__, rc);
+    return rc;
+}
+
+int mm_app_gen_test_cases()
+{
+    int tc = 0;
+    memset(mm_app_tc, 0, sizeof(mm_app_tc));
+    if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_open_close;
+    if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_start_stop_preview;
+    //if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_start_stop_zsl;
+    //if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_start_stop_video_preview;
+    //if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_start_stop_video_record;
+    //if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_start_stop_live_snapshot;
+    //if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_capture_regular;
+    //if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_capture_burst;
+    //if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_rdi_cont;
+    //if (tc < MM_QCAM_APP_TEST_NUM) mm_app_tc[tc++].f = mm_app_tc_rdi_burst;
+
+    return tc;
+}
+
+int mm_app_unit_test_entry(mm_camera_app_t *cam_app)
+{
+    int rc = MM_CAMERA_OK;
+    int i, j, tc = 0;
+
+    tc = mm_app_gen_test_cases();
+    CDBG("Running %d test cases\n",tc);
+    for (i = 0; i < tc; i++) {
+        for (j = 0; j < MM_QCAMERA_APP_UTEST_OUTER_LOOP; j++) {
+            mm_app_tc[i].r = mm_app_tc[i].f(cam_app);
+            if (mm_app_tc[i].r != MM_CAMERA_OK) {
+                printf("%s: test case %d (iteration %d) error = %d, abort unit testing engine!!!!\n",
+                       __func__, i, j, mm_app_tc[i].r);
+                rc = mm_app_tc[i].r;
+                goto end;
+            }
+        }
+    }
+end:
+    printf("nTOTAL_TSET_CASE = %d, NUM_TEST_RAN = %d, rc=%d\n", tc, i, rc);
+    return rc;
+}
+
+
+
+
diff --git a/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_video.c b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_video.c
new file mode 100644
index 0000000..338105e
--- /dev/null
+++ b/camera/QCamera2/stack/mm-camera-test/src/mm_qcamera_video.c
@@ -0,0 +1,256 @@
+/*
+Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "mm_qcamera_dbg.h"
+#include "mm_qcamera_app.h"
+
+static void mm_app_video_notify_cb(mm_camera_super_buf_t *bufs,
+                                   void *user_data)
+{
+    char file_name[64];
+    mm_camera_buf_def_t *frame = bufs->bufs[0];
+    mm_camera_test_obj_t *pme = (mm_camera_test_obj_t *)user_data;
+
+    CDBG("%s: BEGIN - length=%zu, frame idx = %d\n",
+         __func__, frame->frame_len, frame->frame_idx);
+    snprintf(file_name, sizeof(file_name), "V_C%d", pme->cam->camera_handle);
+    mm_app_dump_frame(frame, file_name, "yuv", frame->frame_idx);
+
+    if (MM_CAMERA_OK != pme->cam->ops->qbuf(bufs->camera_handle,
+                                            bufs->ch_id,
+                                            frame)) {
+        CDBG_ERROR("%s: Failed in Preview Qbuf\n", __func__);
+    }
+    mm_app_cache_ops((mm_camera_app_meminfo_t *)frame->mem_info,
+                     ION_IOC_INV_CACHES);
+
+    CDBG("%s: END\n", __func__);
+}
+
+mm_camera_stream_t * mm_app_add_video_stream(mm_camera_test_obj_t *test_obj,
+                                             mm_camera_channel_t *channel,
+                                             mm_camera_buf_notify_t stream_cb,
+                                             void *userdata,
+                                             uint8_t num_bufs)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_stream_t *stream = NULL;
+    cam_capability_t *cam_cap = (cam_capability_t *)(test_obj->cap_buf.buf.buffer);
+
+    stream = mm_app_add_stream(test_obj, channel);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add stream failed\n", __func__);
+        return NULL;
+    }
+
+    stream->s_config.mem_vtbl.get_bufs = mm_app_stream_initbuf;
+    stream->s_config.mem_vtbl.put_bufs = mm_app_stream_deinitbuf;
+    stream->s_config.mem_vtbl.clean_invalidate_buf =
+      mm_app_stream_clean_invalidate_buf;
+    stream->s_config.mem_vtbl.invalidate_buf = mm_app_stream_invalidate_buf;
+    stream->s_config.mem_vtbl.user_data = (void *)stream;
+    stream->s_config.stream_cb = stream_cb;
+    stream->s_config.userdata = userdata;
+    stream->num_of_bufs = num_bufs;
+
+    stream->s_config.stream_info = (cam_stream_info_t *)stream->s_info_buf.buf.buffer;
+    memset(stream->s_config.stream_info, 0, sizeof(cam_stream_info_t));
+    stream->s_config.stream_info->stream_type = CAM_STREAM_TYPE_VIDEO;
+    stream->s_config.stream_info->streaming_mode = CAM_STREAMING_MODE_CONTINUOUS;
+    stream->s_config.stream_info->fmt = DEFAULT_VIDEO_FORMAT;
+    stream->s_config.stream_info->dim.width = DEFAULT_VIDEO_WIDTH;
+    stream->s_config.stream_info->dim.height = DEFAULT_VIDEO_HEIGHT;
+    stream->s_config.padding_info = cam_cap->padding_info;
+
+    rc = mm_app_config_stream(test_obj, channel, stream, &stream->s_config);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:config preview stream err=%d\n", __func__, rc);
+        return NULL;
+    }
+
+    return stream;
+}
+
+mm_camera_channel_t * mm_app_add_video_channel(mm_camera_test_obj_t *test_obj)
+{
+    mm_camera_channel_t *channel = NULL;
+    mm_camera_stream_t *stream = NULL;
+
+    channel = mm_app_add_channel(test_obj,
+                                 MM_CHANNEL_TYPE_VIDEO,
+                                 NULL,
+                                 NULL,
+                                 NULL);
+    if (NULL == channel) {
+        CDBG_ERROR("%s: add channel failed", __func__);
+        return NULL;
+    }
+
+    stream = mm_app_add_video_stream(test_obj,
+                                     channel,
+                                     mm_app_video_notify_cb,
+                                     (void *)test_obj,
+                                     1);
+    if (NULL == stream) {
+        CDBG_ERROR("%s: add video stream failed\n", __func__);
+        mm_app_del_channel(test_obj, channel);
+        return NULL;
+    }
+
+    return channel;
+}
+
+int mm_app_start_record_preview(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *p_ch = NULL;
+    mm_camera_channel_t *v_ch = NULL;
+    mm_camera_channel_t *s_ch = NULL;
+
+    p_ch = mm_app_add_preview_channel(test_obj);
+    if (NULL == p_ch) {
+        CDBG_ERROR("%s: add preview channel failed", __func__);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    v_ch = mm_app_add_video_channel(test_obj);
+    if (NULL == v_ch) {
+        CDBG_ERROR("%s: add video channel failed", __func__);
+        mm_app_del_channel(test_obj, p_ch);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    s_ch = mm_app_add_snapshot_channel(test_obj);
+    if (NULL == s_ch) {
+        CDBG_ERROR("%s: add snapshot channel failed", __func__);
+        mm_app_del_channel(test_obj, p_ch);
+        mm_app_del_channel(test_obj, v_ch);
+        return -MM_CAMERA_E_GENERAL;
+    }
+
+    rc = mm_app_start_channel(test_obj, p_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:start preview failed rc=%d\n", __func__, rc);
+        mm_app_del_channel(test_obj, p_ch);
+        mm_app_del_channel(test_obj, v_ch);
+        mm_app_del_channel(test_obj, s_ch);
+        return rc;
+    }
+
+    return rc;
+}
+
+int mm_app_stop_record_preview(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *p_ch = NULL;
+    mm_camera_channel_t *v_ch = NULL;
+    mm_camera_channel_t *s_ch = NULL;
+
+    p_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_PREVIEW);
+    v_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_VIDEO);
+    s_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_SNAPSHOT);
+
+    rc = mm_app_stop_and_del_channel(test_obj, p_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:Stop Preview failed rc=%d\n", __func__, rc);
+    }
+
+    rc = mm_app_stop_and_del_channel(test_obj, v_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:Stop Preview failed rc=%d\n", __func__, rc);
+    }
+
+    rc = mm_app_stop_and_del_channel(test_obj, s_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:Stop Preview failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
+
+int mm_app_start_record(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *v_ch = NULL;
+
+    v_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_VIDEO);
+
+    rc = mm_app_start_channel(test_obj, v_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:start recording failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
+
+int mm_app_stop_record(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *v_ch = NULL;
+
+    v_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_VIDEO);
+
+    rc = mm_app_stop_channel(test_obj, v_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:stop recording failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
+
+int mm_app_start_live_snapshot(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *s_ch = NULL;
+
+    s_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_SNAPSHOT);
+
+    rc = mm_app_start_channel(test_obj, s_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:start recording failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
+
+int mm_app_stop_live_snapshot(mm_camera_test_obj_t *test_obj)
+{
+    int rc = MM_CAMERA_OK;
+    mm_camera_channel_t *s_ch = NULL;
+
+    s_ch = mm_app_get_channel_by_type(test_obj, MM_CHANNEL_TYPE_SNAPSHOT);
+
+    rc = mm_app_stop_channel(test_obj, s_ch);
+    if (MM_CAMERA_OK != rc) {
+        CDBG_ERROR("%s:stop recording failed rc=%d\n", __func__, rc);
+    }
+
+    return rc;
+}
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/Android.mk b/camera/QCamera2/stack/mm-jpeg-interface/Android.mk
new file mode 100644
index 0000000..90527f5
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/Android.mk
@@ -0,0 +1,67 @@
+OLD_LOCAL_PATH := $(LOCAL_PATH)
+LOCAL_PATH := $(call my-dir)
+
+include $(LOCAL_PATH)/../../../common.mk
+include $(CLEAR_VARS)
+
+# Too many clang warnings/errors, see b/23163853.
+LOCAL_CLANG := false
+
+LOCAL_32_BIT_ONLY := $(BOARD_QTI_CAMERA_32BIT_ONLY)
+LOCAL_CFLAGS+= -D_ANDROID_
+
+LOCAL_CFLAGS += -Wall -Wextra -Werror -Wno-unused-parameter
+
+LOCAL_C_INCLUDES+= $(kernel_includes)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(common_deps)
+
+LOCAL_C_INCLUDES += \
+    frameworks/native/include/media/openmax \
+    $(LOCAL_PATH)/inc \
+    $(LOCAL_PATH)/../common \
+    $(LOCAL_PATH)/../../../ \
+    $(LOCAL_PATH)/../../../mm-image-codec/qexif \
+    $(LOCAL_PATH)/../../../mm-image-codec/qomx_core
+
+ifeq ($(strip $(TARGET_USES_ION)),true)
+    LOCAL_CFLAGS += -DUSE_ION
+endif
+
+ifneq (,$(filter  msm8610,$(TARGET_BOARD_PLATFORM)))
+    LOCAL_CFLAGS+= -DLOAD_ADSP_RPC_LIB
+endif
+
+DUAL_JPEG_TARGET_LIST := msm8974
+DUAL_JPEG_TARGET_LIST += msm8994
+
+ifneq (,$(filter  $(DUAL_JPEG_TARGET_LIST),$(TARGET_BOARD_PLATFORM)))
+    LOCAL_CFLAGS+= -DMM_JPEG_CONCURRENT_SESSIONS_COUNT=2
+else
+    LOCAL_CFLAGS+= -DMM_JPEG_CONCURRENT_SESSIONS_COUNT=1
+endif
+
+JPEG_PIPELINE_TARGET_LIST := msm8994
+JPEG_PIPELINE_TARGET_LIST += msm8992
+
+ifneq (,$(filter  $(JPEG_PIPELINE_TARGET_LIST),$(TARGET_BOARD_PLATFORM)))
+    LOCAL_CFLAGS+= -DMM_JPEG_USE_PIPELINE
+endif
+
+LOCAL_SRC_FILES := \
+    src/mm_jpeg_queue.c \
+    src/mm_jpeg_exif.c \
+    src/mm_jpeg.c \
+    src/mm_jpeg_interface.c \
+    src/mm_jpeg_ionbuf.c \
+    src/mm_jpegdec_interface.c \
+    src/mm_jpegdec.c
+
+LOCAL_MODULE           := libmmjpeg_interface
+LOCAL_PRELINK_MODULE   := false
+LOCAL_SHARED_LIBRARIES := libdl libcutils liblog libqomx_core
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_32_BIT_ONLY := $(BOARD_QTI_CAMERA_32BIT_ONLY)
+include $(BUILD_SHARED_LIBRARY)
+
+LOCAL_PATH := $(OLD_LOCAL_PATH)
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg.h b/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg.h
new file mode 100644
index 0000000..97419b0
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg.h
@@ -0,0 +1,509 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef MM_JPEG_H_
+#define MM_JPEG_H_
+
+#include <cam_semaphore.h>
+#include "mm_jpeg_interface.h"
+#include "cam_list.h"
+#include "OMX_Types.h"
+#include "OMX_Index.h"
+#include "OMX_Core.h"
+#include "OMX_Component.h"
+#include "QOMX_JpegExtensions.h"
+#include "mm_jpeg_ionbuf.h"
+
+#define MM_JPEG_MAX_THREADS 30
+#define MM_JPEG_CIRQ_SIZE 30
+#define MM_JPEG_MAX_SESSION 10
+#define MAX_EXIF_TABLE_ENTRIES 50
+#define MAX_JPEG_SIZE 20000000
+#define MAX_OMX_HANDLES (5)
+#define ASPECT_TOLERANCE 0.001
+
+
+/** mm_jpeg_abort_state_t:
+ *  @MM_JPEG_ABORT_NONE: Abort is not issued
+ *  @MM_JPEG_ABORT_INIT: Abort is issued from the client
+ *  @MM_JPEG_ABORT_DONE: Abort is completed
+ *
+ *  State representing the abort state
+ **/
+typedef enum {
+  MM_JPEG_ABORT_NONE,
+  MM_JPEG_ABORT_INIT,
+  MM_JPEG_ABORT_DONE,
+} mm_jpeg_abort_state_t;
+
+
+/* define max num of supported concurrent jpeg jobs by OMX engine.
+ * Current, only one per time */
+#define NUM_MAX_JPEG_CNCURRENT_JOBS 2
+
+#define JOB_ID_MAGICVAL 0x1
+#define JOB_HIST_MAX 10000
+
+/** DUMP_TO_FILE:
+ *  @filename: file name
+ *  @p_addr: address of the buffer
+ *  @len: buffer length
+ *
+ *  dump the image to the file
+ **/
+#define DUMP_TO_FILE(filename, p_addr, len) ({ \
+  size_t rc = 0; \
+  FILE *fp = fopen(filename, "w+"); \
+  if (fp) { \
+    rc = fwrite(p_addr, 1, len, fp); \
+    CDBG_ERROR("%s:%d] written size %zu", __func__, __LINE__, len); \
+    fclose(fp); \
+  } else { \
+    CDBG_ERROR("%s:%d] open %s failed", __func__, __LINE__, filename); \
+  } \
+})
+
+/** DUMP_TO_FILE2:
+ *  @filename: file name
+ *  @p_addr: address of the buffer
+ *  @len: buffer length
+ *
+ *  dump the image to the file if the memory is non-contiguous
+ **/
+#define DUMP_TO_FILE2(filename, p_addr1, len1, paddr2, len2) ({ \
+  size_t rc = 0; \
+  FILE *fp = fopen(filename, "w+"); \
+  if (fp) { \
+    rc = fwrite(p_addr1, 1, len1, fp); \
+    rc = fwrite(p_addr2, 1, len2, fp); \
+    CDBG_ERROR("%s:%d] written %zu %zu", __func__, __LINE__, len1, len2); \
+    fclose(fp); \
+  } else { \
+    CDBG_ERROR("%s:%d] open %s failed", __func__, __LINE__, filename); \
+  } \
+})
+
+/** MM_JPEG_CHK_ABORT:
+ *  @p: client pointer
+ *  @ret: return value
+ *  @label: label to jump to
+ *
+ *  check the abort failure
+ **/
+#define MM_JPEG_CHK_ABORT(p, ret, label) ({ \
+  if (MM_JPEG_ABORT_INIT == p->abort_state) { \
+    CDBG_ERROR("%s:%d] jpeg abort", __func__, __LINE__); \
+    ret = OMX_ErrorNone; \
+    goto label; \
+  } \
+})
+
+#define GET_CLIENT_IDX(x) ((x) & 0xff)
+#define GET_SESSION_IDX(x) (((x) >> 8) & 0xff)
+#define GET_JOB_IDX(x) (((x) >> 16) & 0xff)
+
+typedef struct {
+  union {
+    int i_data[MM_JPEG_CIRQ_SIZE];
+    void *p_data[MM_JPEG_CIRQ_SIZE];
+  };
+  int front;
+  int rear;
+  int count;
+  pthread_mutex_t lock;
+} mm_jpeg_cirq_t;
+
+/** cirq_reset:
+ *
+ *  Arguments:
+ *    @q: circular queue
+ *
+ *  Return:
+ *       none
+ *
+ *  Description:
+ *       Resets the circular queue
+ *
+ **/
+static inline void cirq_reset(mm_jpeg_cirq_t *q)
+{
+  q->front = 0;
+  q->rear = 0;
+  q->count = 0;
+  pthread_mutex_init(&q->lock, NULL);
+}
+
+/** cirq_empty:
+ *
+ *  Arguments:
+ *    @q: circular queue
+ *
+ *  Return:
+ *       none
+ *
+ *  Description:
+ *       check if the curcular queue is empty
+ *
+ **/
+#define cirq_empty(q) (q->count == 0)
+
+/** cirq_full:
+ *
+ *  Arguments:
+ *    @q: circular queue
+ *
+ *  Return:
+ *       none
+ *
+ *  Description:
+ *       check if the curcular queue is full
+ *
+ **/
+#define cirq_full(q) (q->count == MM_JPEG_CIRQ_SIZE)
+
+/** cirq_enqueue:
+ *
+ *  Arguments:
+ *    @q: circular queue
+ *    @data: data to be inserted
+ *
+ *  Return:
+ *       true/false
+ *
+ *  Description:
+ *       enqueue an element into circular queue
+ *
+ **/
+#define cirq_enqueue(q, type, data) ({ \
+  int rc = 0; \
+  pthread_mutex_lock(&q->lock); \
+  if (cirq_full(q)) { \
+    rc = -1; \
+  } else { \
+    q->type[q->rear] = data; \
+    q->rear = (q->rear + 1) % MM_JPEG_CIRQ_SIZE; \
+    q->count++; \
+  } \
+  pthread_mutex_unlock(&q->lock); \
+  rc; \
+})
+
+/** cirq_dequeue:
+ *
+ *  Arguments:
+ *    @q: circular queue
+ *    @data: data to be popped
+ *
+ *  Return:
+ *       true/false
+ *
+ *  Description:
+ *       dequeue an element from the circular queue
+ *
+ **/
+#define cirq_dequeue(q, type, data) ({ \
+  int rc = 0; \
+  pthread_mutex_lock(&q->lock); \
+  if (cirq_empty(q)) { \
+    pthread_mutex_unlock(&q->lock); \
+    rc = -1; \
+  } else { \
+    data = q->type[q->front]; \
+    q->count--; \
+  } \
+  pthread_mutex_unlock(&q->lock); \
+  rc; \
+})
+
+
+typedef union {
+  uint32_t u32;
+  void* p;
+} mm_jpeg_q_data_t;
+
+  typedef struct {
+  struct cam_list list;
+  mm_jpeg_q_data_t data;
+} mm_jpeg_q_node_t;
+
+typedef struct {
+  mm_jpeg_q_node_t head; /* dummy head */
+  uint32_t size;
+  pthread_mutex_t lock;
+} mm_jpeg_queue_t;
+
+typedef enum {
+  MM_JPEG_CMD_TYPE_JOB,          /* job cmd */
+  MM_JPEG_CMD_TYPE_EXIT,         /* EXIT cmd for exiting jobMgr thread */
+  MM_JPEG_CMD_TYPE_DECODE_JOB,
+  MM_JPEG_CMD_TYPE_MAX
+} mm_jpeg_cmd_type_t;
+
+typedef struct mm_jpeg_job_session {
+  uint32_t client_hdl;           /* client handler */
+  uint32_t jobId;                /* job ID */
+  uint32_t sessionId;            /* session ID */
+  mm_jpeg_encode_params_t params; /* encode params */
+  mm_jpeg_decode_params_t dec_params; /* encode params */
+  mm_jpeg_encode_job_t encode_job;             /* job description */
+  mm_jpeg_decode_job_t decode_job;
+  pthread_t encode_pid;          /* encode thread handler*/
+
+  void *jpeg_obj;                /* ptr to mm_jpeg_obj */
+  jpeg_job_status_t job_status;  /* job status */
+
+  int state_change_pending;      /* flag to indicate if state change is pending */
+  OMX_ERRORTYPE error_flag;      /* variable to indicate error during encoding */
+  mm_jpeg_abort_state_t abort_state; /* variable to indicate abort during encoding */
+
+  /* OMX related */
+  OMX_HANDLETYPE omx_handle;                      /* handle to omx engine */
+  OMX_CALLBACKTYPE omx_callbacks;                 /* callbacks to omx engine */
+
+  /* buffer headers */
+  OMX_BUFFERHEADERTYPE *p_in_omx_buf[MM_JPEG_MAX_BUF];
+  OMX_BUFFERHEADERTYPE *p_in_omx_thumb_buf[MM_JPEG_MAX_BUF];
+  OMX_BUFFERHEADERTYPE *p_out_omx_buf[MM_JPEG_MAX_BUF];
+
+  OMX_PARAM_PORTDEFINITIONTYPE inputPort;
+  OMX_PARAM_PORTDEFINITIONTYPE outputPort;
+  OMX_PARAM_PORTDEFINITIONTYPE inputTmbPort;
+
+  /* event locks */
+  pthread_mutex_t lock;
+  pthread_cond_t cond;
+
+  QEXIF_INFO_DATA exif_info_local[MAX_EXIF_TABLE_ENTRIES];  //all exif tags for JPEG encoder
+  int exif_count_local;
+
+  mm_jpeg_cirq_t cb_q;
+  int32_t ebd_count;
+  int32_t fbd_count;
+
+  /* this flag represents whether the job is active */
+  OMX_BOOL active;
+
+  /* this flag indicates if the configration is complete */
+  OMX_BOOL config;
+
+  /* job history count to generate unique id */
+  unsigned int job_hist;
+
+  OMX_BOOL encoding;
+
+  buffer_t work_buffer;
+
+  OMX_EVENTTYPE omxEvent;
+  int event_pending;
+
+  uint8_t *meta_enc_key;
+  size_t meta_enc_keylen;
+
+  struct mm_jpeg_job_session *next_session;
+
+  uint32_t curr_out_buf_idx;
+
+  uint32_t num_omx_sessions;
+  OMX_BOOL auto_out_buf;
+
+  mm_jpeg_queue_t *session_handle_q;
+  mm_jpeg_queue_t *out_buf_q;
+
+  int thumb_from_main;
+  uint32_t job_index;
+} mm_jpeg_job_session_t;
+
+typedef struct {
+  mm_jpeg_encode_job_t encode_job;
+  uint32_t job_id;
+  uint32_t client_handle;
+} mm_jpeg_encode_job_info_t;
+
+typedef struct {
+  mm_jpeg_decode_job_t decode_job;
+  uint32_t job_id;
+  uint32_t client_handle;
+} mm_jpeg_decode_job_info_t;
+
+typedef struct {
+  mm_jpeg_cmd_type_t type;
+  union {
+    mm_jpeg_encode_job_info_t enc_info;
+    mm_jpeg_decode_job_info_t dec_info;
+  };
+} mm_jpeg_job_q_node_t;
+
+typedef struct {
+  uint8_t is_used;                /* flag: if is a valid client */
+  uint32_t client_handle;         /* client handle */
+  mm_jpeg_job_session_t session[MM_JPEG_MAX_SESSION];
+  pthread_mutex_t lock;           /* job lock */
+} mm_jpeg_client_t;
+
+typedef struct {
+  pthread_t pid;                  /* job cmd thread ID */
+  cam_semaphore_t job_sem;        /* semaphore for job cmd thread */
+  mm_jpeg_queue_t job_queue;      /* queue for job to do */
+} mm_jpeg_job_cmd_thread_t;
+
+#define MAX_JPEG_CLIENT_NUM 8
+typedef struct mm_jpeg_obj_t {
+  /* ClientMgr */
+  int num_clients;                                /* num of clients */
+  mm_jpeg_client_t clnt_mgr[MAX_JPEG_CLIENT_NUM]; /* client manager */
+
+  /* JobMkr */
+  pthread_mutex_t job_lock;                       /* job lock */
+  mm_jpeg_job_cmd_thread_t job_mgr;               /* job mgr thread including todo_q*/
+  mm_jpeg_queue_t ongoing_job_q;                  /* queue for ongoing jobs */
+  buffer_t ionBuffer[MM_JPEG_CONCURRENT_SESSIONS_COUNT];
+
+
+  /* Max pic dimension for work buf calc*/
+  uint32_t max_pic_w;
+  uint32_t max_pic_h;
+#ifdef LOAD_ADSP_RPC_LIB
+  void *adsprpc_lib_handle;
+#endif
+
+  uint32_t work_buf_cnt;
+
+  uint32_t num_sessions;
+
+} mm_jpeg_obj;
+
+/** mm_jpeg_pending_func_t:
+ *
+ * Intermediate function for transition change
+ **/
+typedef OMX_ERRORTYPE (*mm_jpeg_transition_func_t)(void *);
+
+extern int32_t mm_jpeg_init(mm_jpeg_obj *my_obj);
+extern int32_t mm_jpeg_deinit(mm_jpeg_obj *my_obj);
+extern uint32_t mm_jpeg_new_client(mm_jpeg_obj *my_obj);
+extern int32_t mm_jpeg_start_job(mm_jpeg_obj *my_obj,
+  mm_jpeg_job_t* job,
+  uint32_t* jobId);
+extern int32_t mm_jpeg_abort_job(mm_jpeg_obj *my_obj,
+  uint32_t jobId);
+extern int32_t mm_jpeg_close(mm_jpeg_obj *my_obj,
+  uint32_t client_hdl);
+extern int32_t mm_jpeg_create_session(mm_jpeg_obj *my_obj,
+  uint32_t client_hdl,
+  mm_jpeg_encode_params_t *p_params,
+  uint32_t* p_session_id);
+extern int32_t mm_jpeg_destroy_session_by_id(mm_jpeg_obj *my_obj,
+  uint32_t session_id);
+
+extern int32_t mm_jpegdec_init(mm_jpeg_obj *my_obj);
+extern int32_t mm_jpegdec_deinit(mm_jpeg_obj *my_obj);
+extern int32_t mm_jpeg_jobmgr_thread_release(mm_jpeg_obj * my_obj);
+extern int32_t mm_jpeg_jobmgr_thread_launch(mm_jpeg_obj *my_obj);
+extern int32_t mm_jpegdec_start_decode_job(mm_jpeg_obj *my_obj,
+  mm_jpeg_job_t* job,
+  uint32_t* jobId);
+
+extern int32_t mm_jpegdec_create_session(mm_jpeg_obj *my_obj,
+  uint32_t client_hdl,
+  mm_jpeg_decode_params_t *p_params,
+  uint32_t* p_session_id);
+
+extern int32_t mm_jpegdec_destroy_session_by_id(mm_jpeg_obj *my_obj,
+  uint32_t session_id);
+
+extern int32_t mm_jpegdec_abort_job(mm_jpeg_obj *my_obj,
+  uint32_t jobId);
+
+int32_t mm_jpegdec_process_decoding_job(mm_jpeg_obj *my_obj,
+    mm_jpeg_job_q_node_t* job_node);
+
+/* utiltity fucntion declared in mm-camera-inteface2.c
+ * and need be used by mm-camera and below*/
+uint32_t mm_jpeg_util_generate_handler(uint8_t index);
+uint8_t mm_jpeg_util_get_index_by_handler(uint32_t handler);
+
+/* basic queue functions */
+extern int32_t mm_jpeg_queue_init(mm_jpeg_queue_t* queue);
+extern int32_t mm_jpeg_queue_enq(mm_jpeg_queue_t* queue,
+    mm_jpeg_q_data_t data);
+extern int32_t mm_jpeg_queue_enq_head(mm_jpeg_queue_t* queue,
+    mm_jpeg_q_data_t data);
+extern mm_jpeg_q_data_t mm_jpeg_queue_deq(mm_jpeg_queue_t* queue);
+extern int32_t mm_jpeg_queue_deinit(mm_jpeg_queue_t* queue);
+extern int32_t mm_jpeg_queue_flush(mm_jpeg_queue_t* queue);
+extern uint32_t mm_jpeg_queue_get_size(mm_jpeg_queue_t* queue);
+extern mm_jpeg_q_data_t mm_jpeg_queue_peek(mm_jpeg_queue_t* queue);
+extern int32_t addExifEntry(QOMX_EXIF_INFO *p_exif_info, exif_tag_id_t tagid,
+  exif_tag_type_t type, uint32_t count, void *data);
+extern int32_t releaseExifEntry(QEXIF_INFO_DATA *p_exif_data);
+extern int process_meta_data(metadata_buffer_t *p_meta,
+  QOMX_EXIF_INFO *exif_info, mm_jpeg_exif_params_t *p_cam3a_params,
+  cam_hal_version_t hal_version);
+
+OMX_ERRORTYPE mm_jpeg_session_change_state(mm_jpeg_job_session_t* p_session,
+  OMX_STATETYPE new_state,
+  mm_jpeg_transition_func_t p_exec);
+
+int map_jpeg_format(mm_jpeg_color_format color_fmt);
+
+OMX_BOOL mm_jpeg_session_abort(mm_jpeg_job_session_t *p_session);
+/**
+ *
+ * special queue functions for job queue
+ **/
+mm_jpeg_job_q_node_t* mm_jpeg_queue_remove_job_by_client_id(
+  mm_jpeg_queue_t* queue, uint32_t client_hdl);
+mm_jpeg_job_q_node_t* mm_jpeg_queue_remove_job_by_job_id(
+  mm_jpeg_queue_t* queue, uint32_t job_id);
+mm_jpeg_job_q_node_t* mm_jpeg_queue_remove_job_by_session_id(
+  mm_jpeg_queue_t* queue, uint32_t session_id);
+mm_jpeg_job_q_node_t* mm_jpeg_queue_remove_job_unlk(
+  mm_jpeg_queue_t* queue, uint32_t job_id);
+
+
+/** mm_jpeg_queue_func_t:
+ *
+ * Intermediate function for queue operation
+ **/
+typedef void (*mm_jpeg_queue_func_t)(void *);
+
+/** mm_jpeg_exif_flash_mode:
+ *
+ * Exif flash mode values
+ **/
+typedef enum {
+  MM_JPEG_EXIF_FLASH_MODE_ON   = 0x1,
+  MM_JPEG_EXIF_FLASH_MODE_OFF  = 0x2,
+  MM_JPEG_EXIF_FLASH_MODE_AUTO = 0x3,
+  MM_JPEG_EXIF_FLASH_MODE_MAX
+} mm_jpeg_exif_flash_mode;
+
+#endif /* MM_JPEG_H_ */
+
+
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg_dbg.h b/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg_dbg.h
new file mode 100644
index 0000000..dd6dc1d
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg_dbg.h
@@ -0,0 +1,77 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_JPEG_DBG_H__
+#define __MM_JPEG_DBG_H__
+
+#define LOG_DEBUG 1
+#define MINIMUM_JPEG_LOG_LEVEL 1
+
+/* Choose debug log level. This will not affect the error logs
+   0: turns off CDBG and CDBG_HIGH logs
+   1: turns-on CDBG_HIGH logs
+   2: turns-on CDBG_HIGH and CDBG logs */
+extern volatile uint32_t gMmJpegIntfLogLevel;
+
+#ifndef LOG_DEBUG
+  #ifdef _ANDROID_
+    #undef LOG_NIDEBUG
+    #undef LOG_TAG
+    #define LOG_NIDEBUG 0
+    #define LOG_TAG "mm-jpeg-intf"
+    #include <utils/Log.h>
+  #else
+    #include <stdio.h>
+    #define ALOGE CDBG
+  #endif
+  #undef CDBG
+  #define CDBG(fmt, args...) do{}while(0)
+#else
+  #ifdef _ANDROID_
+    #undef LOG_NIDEBUG
+    #undef LOG_TAG
+    #define LOG_NIDEBUG 0
+    #define LOG_TAG "mm-jpeg-intf"
+    #include <utils/Log.h>
+    #define CDBG(fmt, args...) ALOGD_IF(gMmJpegIntfLogLevel >= 2, fmt, ##args)
+  #else
+    #include <stdio.h>
+    #define CDBG(fmt, args...) fprintf(stderr, fmt, ##args)
+    #define ALOGE(fmt, args...) fprintf(stderr, fmt, ##args)
+  #endif
+#endif
+
+#ifdef _ANDROID_
+  #define CDBG_HIGH(fmt, args...)   ALOGD_IF(gMmJpegIntfLogLevel >= 1, fmt, ##args)
+  #define CDBG_ERROR(fmt, args...)  ALOGE(fmt, ##args)
+#else
+  #define CDBG_HIGH(fmt, args...) fprintf(stderr, fmt, ##args)
+  #define CDBG_ERROR(fmt, args...) fprintf(stderr, fmt, ##args)
+#endif
+#endif /* __MM_JPEG_DBG_H__ */
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg_inlines.h b/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg_inlines.h
new file mode 100644
index 0000000..39fec8d
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg_inlines.h
@@ -0,0 +1,126 @@
+/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef MM_JPEG_INLINES_H_
+#define MM_JPEG_INLINES_H_
+
+#include "mm_jpeg.h"
+
+/** mm_jpeg_get_session:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_idx: client index
+ *
+ *  Return:
+ *       job index
+ *
+ *  Description:
+ *       Get job index by client id
+ *
+ **/
+static inline mm_jpeg_job_session_t *mm_jpeg_get_session(mm_jpeg_obj *my_obj, uint32_t job_id)
+{
+  mm_jpeg_job_session_t *p_session = NULL;
+  int client_idx =  GET_CLIENT_IDX(job_id);
+  int session_idx= GET_SESSION_IDX(job_id);
+
+  CDBG("%s:%d] client_idx %d session_idx %d", __func__, __LINE__,
+    client_idx, session_idx);
+  if ((session_idx >= MM_JPEG_MAX_SESSION) ||
+    (client_idx >= MAX_JPEG_CLIENT_NUM)) {
+    CDBG_ERROR("%s:%d] invalid job id %x", __func__, __LINE__,
+      job_id);
+    return NULL;
+  }
+  pthread_mutex_lock(&my_obj->clnt_mgr[client_idx].lock);
+  p_session = &my_obj->clnt_mgr[client_idx].session[session_idx];
+  pthread_mutex_unlock(&my_obj->clnt_mgr[client_idx].lock);
+  return p_session;
+}
+
+/** mm_jpeg_get_job_idx:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_idx: client index
+ *
+ *  Return:
+ *       job index
+ *
+ *  Description:
+ *       Get job index by client id
+ *
+ **/
+static inline int mm_jpeg_get_new_session_idx(mm_jpeg_obj *my_obj, int client_idx,
+  mm_jpeg_job_session_t **pp_session)
+{
+  int i = 0;
+  int index = -1;
+  for (i = 0; i < MM_JPEG_MAX_SESSION; i++) {
+    pthread_mutex_lock(&my_obj->clnt_mgr[client_idx].lock);
+    if (!my_obj->clnt_mgr[client_idx].session[i].active) {
+      *pp_session = &my_obj->clnt_mgr[client_idx].session[i];
+      my_obj->clnt_mgr[client_idx].session[i].active = OMX_TRUE;
+      index = i;
+      pthread_mutex_unlock(&my_obj->clnt_mgr[client_idx].lock);
+      break;
+    }
+    pthread_mutex_unlock(&my_obj->clnt_mgr[client_idx].lock);
+  }
+  return index;
+}
+
+/** mm_jpeg_get_job_idx:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_idx: client index
+ *
+ *  Return:
+ *       job index
+ *
+ *  Description:
+ *       Get job index by client id
+ *
+ **/
+static inline void mm_jpeg_remove_session_idx(mm_jpeg_obj *my_obj, uint32_t job_id)
+{
+  int client_idx =  GET_CLIENT_IDX(job_id);
+  int session_idx= GET_SESSION_IDX(job_id);
+  CDBG("%s:%d] client_idx %d session_idx %d", __func__, __LINE__,
+    client_idx, session_idx);
+  pthread_mutex_lock(&my_obj->clnt_mgr[client_idx].lock);
+  my_obj->clnt_mgr[client_idx].session[session_idx].active = OMX_FALSE;
+  pthread_mutex_unlock(&my_obj->clnt_mgr[client_idx].lock);
+}
+
+
+
+#endif /* MM_JPEG_INLINES_H_ */
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg_ionbuf.h b/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg_ionbuf.h
new file mode 100644
index 0000000..2ac7145
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/inc/mm_jpeg_ionbuf.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __MM_JPEG_IONBUF_H__
+#define __MM_JPEG_IONBUF_H__
+
+
+#include <stdio.h>
+#include <linux/msm_ion.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include "mm_jpeg_dbg.h"
+
+typedef struct  {
+  struct ion_fd_data ion_info_fd;
+  struct ion_allocation_data alloc;
+  int p_pmem_fd;
+  size_t size;
+  int ion_fd;
+  uint8_t *addr;
+} buffer_t;
+
+/** buffer_allocate:
+ *
+ *  Arguments:
+ *     @p_buffer: ION buffer
+ *
+ *  Return:
+ *     buffer address
+ *
+ *  Description:
+ *      allocates ION buffer
+ *
+ **/
+void* buffer_allocate(buffer_t *p_buffer, int cached);
+
+/** buffer_deallocate:
+ *
+ *  Arguments:
+ *     @p_buffer: ION buffer
+ *
+ *  Return:
+ *     error val
+ *
+ *  Description:
+ *      deallocates ION buffer
+ *
+ **/
+int buffer_deallocate(buffer_t *p_buffer);
+
+/** buffer_invalidate:
+ *
+ *  Arguments:
+ *     @p_buffer: ION buffer
+ *
+ *  Return:
+ *     error val
+ *
+ *  Description:
+ *      Invalidates the cached buffer
+ *
+ **/
+int buffer_invalidate(buffer_t *p_buffer);
+
+#endif
+
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg.c b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg.c
new file mode 100755
index 0000000..9fa8b89
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg.c
@@ -0,0 +1,2996 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/prctl.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <cutils/trace.h>
+#include <math.h>
+
+#include "mm_jpeg_dbg.h"
+#include "mm_jpeg_interface.h"
+#include "mm_jpeg.h"
+#include "mm_jpeg_inlines.h"
+
+#ifdef LOAD_ADSP_RPC_LIB
+#include <dlfcn.h>
+#include <stdlib.h>
+#endif
+
+#define ENCODING_MODE_PARALLEL 1
+
+#define META_KEYFILE QCAMERA_DUMP_FRM_LOCATION"metadata.key"
+
+/**
+ * minimal resolution needed for normal mode of ops
+ */
+#define MM_JPEG_MIN_NOM_RESOLUTION 7680000 /*8MP*/
+
+#ifdef MM_JPEG_USE_PIPELINE
+#undef MM_JPEG_CONCURRENT_SESSIONS_COUNT
+#define MM_JPEG_CONCURRENT_SESSIONS_COUNT 1
+#endif
+
+OMX_ERRORTYPE mm_jpeg_ebd(OMX_HANDLETYPE hComponent,
+    OMX_PTR pAppData,
+    OMX_BUFFERHEADERTYPE* pBuffer);
+OMX_ERRORTYPE mm_jpeg_fbd(OMX_HANDLETYPE hComponent,
+    OMX_PTR pAppData,
+    OMX_BUFFERHEADERTYPE* pBuffer);
+OMX_ERRORTYPE mm_jpeg_event_handler(OMX_HANDLETYPE hComponent,
+    OMX_PTR pAppData,
+    OMX_EVENTTYPE eEvent,
+    OMX_U32 nData1,
+    OMX_U32 nData2,
+    OMX_PTR pEventData);
+
+static int32_t mm_jpegenc_destroy_job(mm_jpeg_job_session_t *p_session);
+static void mm_jpegenc_job_done(mm_jpeg_job_session_t *p_session);
+mm_jpeg_job_q_node_t* mm_jpeg_queue_remove_job_by_dst_ptr(
+  mm_jpeg_queue_t* queue, void * dst_ptr);
+static OMX_ERRORTYPE mm_jpeg_session_configure(mm_jpeg_job_session_t *p_session);
+
+/** mm_jpeg_session_send_buffers:
+ *
+ *  Arguments:
+ *    @data: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Send the buffers to OMX layer
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_send_buffers(void *data)
+{
+  uint32_t i = 0;
+  mm_jpeg_job_session_t* p_session = (mm_jpeg_job_session_t *)data;
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  QOMX_BUFFER_INFO lbuffer_info;
+  mm_jpeg_encode_params_t *p_params = &p_session->params;
+
+  memset(&lbuffer_info, 0x0, sizeof(QOMX_BUFFER_INFO));
+  for (i = 0; i < p_params->num_src_bufs; i++) {
+    CDBG("%s:%d] Source buffer %d", __func__, __LINE__, i);
+    lbuffer_info.fd = (OMX_U32)p_params->src_main_buf[i].fd;
+    ret = OMX_UseBuffer(p_session->omx_handle, &(p_session->p_in_omx_buf[i]), 0,
+      &lbuffer_info, p_params->src_main_buf[i].buf_size,
+      p_params->src_main_buf[i].buf_vaddr);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+      return ret;
+    }
+  }
+
+  for (i = 0; i < p_params->num_tmb_bufs; i++) {
+    CDBG("%s:%d] Source buffer %d", __func__, __LINE__, i);
+    lbuffer_info.fd = (OMX_U32)p_params->src_thumb_buf[i].fd;
+    ret = OMX_UseBuffer(p_session->omx_handle,
+        &(p_session->p_in_omx_thumb_buf[i]), 2,
+        &lbuffer_info, p_params->src_thumb_buf[i].buf_size,
+        p_params->src_thumb_buf[i].buf_vaddr);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+      return ret;
+    }
+  }
+
+  for (i = 0; i < p_params->num_dst_bufs; i++) {
+    CDBG("%s:%d] Dest buffer %d", __func__, __LINE__, i);
+    ret = OMX_UseBuffer(p_session->omx_handle, &(p_session->p_out_omx_buf[i]),
+      1, NULL, p_params->dest_buf[i].buf_size,
+      p_params->dest_buf[i].buf_vaddr);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+      return ret;
+    }
+  }
+  CDBG("%s:%d]", __func__, __LINE__);
+  return ret;
+}
+
+
+/** mm_jpeg_session_free_buffers:
+ *
+ *  Arguments:
+ *    @data: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Free the buffers from OMX layer
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_free_buffers(void *data)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  uint32_t i = 0;
+  mm_jpeg_job_session_t* p_session = (mm_jpeg_job_session_t *)data;
+  mm_jpeg_encode_params_t *p_params = &p_session->params;
+
+  for (i = 0; i < p_params->num_src_bufs; i++) {
+    CDBG("%s:%d] Source buffer %d", __func__, __LINE__, i);
+    ret = OMX_FreeBuffer(p_session->omx_handle, 0, p_session->p_in_omx_buf[i]);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+      return ret;
+    }
+  }
+
+  for (i = 0; i < p_params->num_tmb_bufs; i++) {
+    CDBG("%s:%d] Source buffer %d", __func__, __LINE__, i);
+    ret = OMX_FreeBuffer(p_session->omx_handle, 2, p_session->p_in_omx_thumb_buf[i]);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+      return ret;
+    }
+  }
+
+  for (i = 0; i < p_params->num_dst_bufs; i++) {
+    CDBG("%s:%d] Dest buffer %d", __func__, __LINE__, i);
+    ret = OMX_FreeBuffer(p_session->omx_handle, 1, p_session->p_out_omx_buf[i]);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+      return ret;
+    }
+  }
+  CDBG("%s:%d]", __func__, __LINE__);
+  return ret;
+}
+
+
+
+
+/** mm_jpeg_session_change_state:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *    @new_state: new state to be transitioned to
+ *    @p_exec: transition function
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       This method is used for state transition
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_change_state(mm_jpeg_job_session_t* p_session,
+  OMX_STATETYPE new_state,
+  mm_jpeg_transition_func_t p_exec)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  OMX_STATETYPE current_state;
+  CDBG("%s:%d] new_state %d p_exec %p", __func__, __LINE__,
+    new_state, p_exec);
+
+
+  pthread_mutex_lock(&p_session->lock);
+
+  ret = OMX_GetState(p_session->omx_handle, &current_state);
+
+  if (ret) {
+    pthread_mutex_unlock(&p_session->lock);
+    return ret;
+  }
+
+  if (current_state == new_state) {
+    pthread_mutex_unlock(&p_session->lock);
+    return OMX_ErrorNone;
+  }
+
+  p_session->state_change_pending = OMX_TRUE;
+  pthread_mutex_unlock(&p_session->lock);
+  ret = OMX_SendCommand(p_session->omx_handle, OMX_CommandStateSet,
+    new_state, NULL);
+  pthread_mutex_lock(&p_session->lock);
+  if (ret) {
+    CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+    pthread_mutex_unlock(&p_session->lock);
+    return OMX_ErrorIncorrectStateTransition;
+  }
+  CDBG("%s:%d] ", __func__, __LINE__);
+  if ((OMX_ErrorNone != p_session->error_flag) &&
+      (OMX_ErrorOverflow != p_session->error_flag)) {
+    CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, p_session->error_flag);
+    pthread_mutex_unlock(&p_session->lock);
+    return p_session->error_flag;
+  }
+  if (p_exec) {
+    ret = p_exec(p_session);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+      pthread_mutex_unlock(&p_session->lock);
+      return ret;
+    }
+  }
+  CDBG("%s:%d] ", __func__, __LINE__);
+  if (p_session->state_change_pending) {
+    CDBG("%s:%d] before wait", __func__, __LINE__);
+    pthread_cond_wait(&p_session->cond, &p_session->lock);
+    CDBG("%s:%d] after wait", __func__, __LINE__);
+  }
+  pthread_mutex_unlock(&p_session->lock);
+  CDBG("%s:%d] ", __func__, __LINE__);
+  return ret;
+}
+
+/** mm_jpeg_session_create:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error types
+ *
+ *  Description:
+ *       Create a jpeg encode session
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_create(mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+  mm_jpeg_obj *my_obj = (mm_jpeg_obj *) p_session->jpeg_obj;
+  char *omx_lib = "OMX.qcom.image.jpeg.encoder";
+
+  pthread_mutex_init(&p_session->lock, NULL);
+  pthread_cond_init(&p_session->cond, NULL);
+  cirq_reset(&p_session->cb_q);
+  p_session->state_change_pending = OMX_FALSE;
+  p_session->abort_state = MM_JPEG_ABORT_NONE;
+  p_session->error_flag = OMX_ErrorNone;
+  p_session->ebd_count = 0;
+  p_session->fbd_count = 0;
+  p_session->encode_pid = -1;
+  p_session->config = OMX_FALSE;
+  p_session->exif_count_local = 0;
+  p_session->auto_out_buf = OMX_FALSE;
+
+  p_session->omx_callbacks.EmptyBufferDone = mm_jpeg_ebd;
+  p_session->omx_callbacks.FillBufferDone = mm_jpeg_fbd;
+  p_session->omx_callbacks.EventHandler = mm_jpeg_event_handler;
+
+  p_session->thumb_from_main = 0;
+#ifdef MM_JPEG_USE_PIPELINE
+  p_session->thumb_from_main = 1;
+  omx_lib = "OMX.qcom.image.jpeg.encoder_pipeline";
+#endif
+
+  rc = OMX_GetHandle(&p_session->omx_handle,
+      omx_lib,
+      (void *)p_session,
+      &p_session->omx_callbacks);
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s:%d] OMX_GetHandle failed (%d)", __func__, __LINE__, rc);
+    return rc;
+  }
+
+  my_obj->num_sessions++;
+
+  return rc;
+}
+
+
+
+/** mm_jpeg_session_destroy:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       none
+ *
+ *  Description:
+ *       Destroy a jpeg encode session
+ *
+ **/
+void mm_jpeg_session_destroy(mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+  OMX_STATETYPE state;
+  mm_jpeg_obj *my_obj = (mm_jpeg_obj *) p_session->jpeg_obj;
+
+  CDBG("%s:%d] E", __func__, __LINE__);
+  if (NULL == p_session->omx_handle) {
+    CDBG_ERROR("%s:%d] invalid handle", __func__, __LINE__);
+    return;
+  }
+
+  rc = OMX_GetState(p_session->omx_handle, &state);
+
+  //Check state before state transition
+  if ((state == OMX_StateExecuting) || (state == OMX_StatePause)) {
+    rc = mm_jpeg_session_change_state(p_session, OMX_StateIdle, NULL);
+    if (rc) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    }
+  }
+
+  rc = OMX_GetState(p_session->omx_handle, &state);
+
+  if (state == OMX_StateIdle) {
+    rc = mm_jpeg_session_change_state(p_session, OMX_StateLoaded,
+      mm_jpeg_session_free_buffers);
+    if (rc) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    }
+  }
+
+  rc = OMX_FreeHandle(p_session->omx_handle);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] OMX_FreeHandle failed (%d)", __func__, __LINE__, rc);
+  }
+  p_session->omx_handle = NULL;
+
+  pthread_mutex_destroy(&p_session->lock);
+  pthread_cond_destroy(&p_session->cond);
+
+  if (NULL != p_session->meta_enc_key) {
+    free(p_session->meta_enc_key);
+    p_session->meta_enc_key = NULL;
+  }
+
+  my_obj->num_sessions--;
+
+  // Destroy next session
+  if (p_session->next_session) {
+    mm_jpeg_session_destroy(p_session->next_session);
+  }
+
+  CDBG_HIGH("%s:%d] Session destroy successful. X", __func__, __LINE__);
+}
+
+
+
+/** mm_jpeg_session_config_main_buffer_offset:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure the buffer offsets
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_config_main_buffer_offset(
+  mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = 0;
+  OMX_INDEXTYPE buffer_index;
+  QOMX_YUV_FRAME_INFO frame_info;
+  size_t totalSize = 0;
+  mm_jpeg_encode_params_t *p_params = &p_session->params;
+
+  mm_jpeg_buf_t *p_src_buf =
+    &p_params->src_main_buf[0];
+
+  memset(&frame_info, 0x0, sizeof(QOMX_YUV_FRAME_INFO));
+
+  frame_info.cbcrStartOffset[0] = p_src_buf->offset.mp[0].len;
+  frame_info.cbcrStartOffset[1] = p_src_buf->offset.mp[1].len;
+  frame_info.yOffset = p_src_buf->offset.mp[0].offset;
+  frame_info.cbcrOffset[0] = p_src_buf->offset.mp[1].offset;
+  frame_info.cbcrOffset[1] = p_src_buf->offset.mp[2].offset;
+  totalSize = p_src_buf->buf_size;
+
+  rc = OMX_GetExtensionIndex(p_session->omx_handle,
+    QOMX_IMAGE_EXT_BUFFER_OFFSET_NAME, &buffer_index);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+
+  CDBG_HIGH("%s:%d] yOffset = %d, cbcrOffset = (%d %d), totalSize = %zd,"
+    "cbcrStartOffset = (%d %d)", __func__, __LINE__,
+    (int)frame_info.yOffset,
+    (int)frame_info.cbcrOffset[0],
+    (int)frame_info.cbcrOffset[1],
+    totalSize,
+    (int)frame_info.cbcrStartOffset[0],
+    (int)frame_info.cbcrStartOffset[1]);
+
+  rc = OMX_SetParameter(p_session->omx_handle, buffer_index, &frame_info);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+  return rc;
+}
+
+/** mm_jpeg_encoding_mode:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure the serial or parallel encoding
+ *       mode
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_encoding_mode(
+  mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = 0;
+  OMX_INDEXTYPE indextype;
+  QOMX_ENCODING_MODE encoding_mode;
+
+  rc = OMX_GetExtensionIndex(p_session->omx_handle,
+    QOMX_IMAGE_EXT_ENCODING_MODE_NAME, &indextype);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+
+  if (ENCODING_MODE_PARALLEL) {
+    encoding_mode = OMX_Parallel_Encoding;
+  } else {
+    encoding_mode = OMX_Serial_Encoding;
+  }
+  CDBG("%s:%d] encoding mode = %d ", __func__, __LINE__,
+    (int)encoding_mode);
+  rc = OMX_SetParameter(p_session->omx_handle, indextype, &encoding_mode);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+  return rc;
+}
+
+/** mm_jpeg_get_speed:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       ops speed type for jpeg
+ *
+ *  Description:
+ *      Configure normal or high speed jpeg
+ *
+ **/
+QOMX_JPEG_SPEED_MODE mm_jpeg_get_speed(
+  mm_jpeg_job_session_t* p_session)
+{
+  mm_jpeg_encode_params_t *p_params = &p_session->params;
+  cam_dimension_t *p_dim = &p_params->main_dim.src_dim;
+  if (p_params->burst_mode ||
+    (MM_JPEG_MIN_NOM_RESOLUTION < (p_dim->width * p_dim->height))) {
+    return QOMX_JPEG_SPEED_MODE_HIGH;
+  }
+  return QOMX_JPEG_SPEED_MODE_NORMAL;
+}
+
+/** mm_jpeg_speed_mode:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *      Configure normal or high speed jpeg
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_speed_mode(
+  mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = 0;
+  OMX_INDEXTYPE indextype;
+  QOMX_JPEG_SPEED jpeg_speed;
+
+  rc = OMX_GetExtensionIndex(p_session->omx_handle,
+    QOMX_IMAGE_EXT_JPEG_SPEED_NAME, &indextype);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+
+  jpeg_speed.speedMode = mm_jpeg_get_speed(p_session);
+  CDBG_HIGH("%s:%d] speed %d", __func__, __LINE__, jpeg_speed.speedMode);
+
+  rc = OMX_SetParameter(p_session->omx_handle, indextype, &jpeg_speed);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+  return rc;
+}
+
+
+/** mm_jpeg_mem_ops:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure the serial or parallel encoding
+ *       mode
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_mem_ops(
+  mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = 0;
+  OMX_INDEXTYPE indextype;
+  QOMX_MEM_OPS mem_ops;
+  mm_jpeg_encode_params_t *p_params = &p_session->params;
+
+  mem_ops.get_memory = p_params->get_memory;
+
+  rc = OMX_GetExtensionIndex(p_session->omx_handle,
+    QOMX_IMAGE_EXT_MEM_OPS_NAME, &indextype);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+
+  rc = OMX_SetParameter(p_session->omx_handle, indextype, &mem_ops);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+  return rc;
+}
+
+/** mm_jpeg_metadata:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Pass meta data
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_metadata(
+  mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+  OMX_INDEXTYPE indexType;
+  mm_jpeg_encode_job_t *p_jobparams = &p_session->encode_job;
+  QOMX_METADATA lMeta;
+
+  rc = OMX_GetExtensionIndex(p_session->omx_handle,
+      QOMX_IMAGE_EXT_METADATA_NAME, &indexType);
+
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+
+  lMeta.metadata = (OMX_U8 *)p_jobparams->p_metadata;
+  lMeta.metaPayloadSize = sizeof(*p_jobparams->p_metadata);
+  lMeta.mobicat_mask = p_jobparams->mobicat_mask;
+
+  rc = OMX_SetConfig(p_session->omx_handle, indexType, &lMeta);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+  return OMX_ErrorNone;
+}
+
+/** mm_jpeg_meta_enc_key:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Pass metadata encrypt key
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_meta_enc_key(
+  mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+  OMX_INDEXTYPE indexType;
+  QOMX_META_ENC_KEY lKey;
+
+  lKey.metaKey = p_session->meta_enc_key;
+  lKey.keyLen = p_session->meta_enc_keylen;
+
+  if ((!lKey.metaKey) || (!lKey.keyLen)){
+    CDBG_ERROR("%s:%d] Key is invalid", __func__, __LINE__);
+    return OMX_ErrorNone;
+  }
+
+  rc = OMX_GetExtensionIndex(p_session->omx_handle,
+      QOMX_IMAGE_EXT_META_ENC_KEY_NAME, &indexType);
+
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+
+  rc = OMX_SetConfig(p_session->omx_handle, indexType, &lKey);
+  if (rc != OMX_ErrorNone) {
+    CDBG_ERROR("%s:%d] Failed", __func__, __LINE__);
+    return rc;
+  }
+  return OMX_ErrorNone;
+}
+
+/** map_jpeg_format:
+ *
+ *  Arguments:
+ *    @color_fmt: color format
+ *
+ *  Return:
+ *       OMX color format
+ *
+ *  Description:
+ *       Map mmjpeg color format to OMX color format
+ *
+ **/
+int map_jpeg_format(mm_jpeg_color_format color_fmt)
+{
+  switch (color_fmt) {
+  case MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2:
+    return (int)OMX_QCOM_IMG_COLOR_FormatYVU420SemiPlanar;
+  case MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2:
+    return (int)OMX_COLOR_FormatYUV420SemiPlanar;
+  case MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V1:
+    return (int)OMX_QCOM_IMG_COLOR_FormatYVU422SemiPlanar;
+  case MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V1:
+    return (int)OMX_COLOR_FormatYUV422SemiPlanar;
+  case MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V2:
+    return (int)OMX_QCOM_IMG_COLOR_FormatYVU422SemiPlanar_h1v2;
+  case MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V2:
+    return (int)OMX_QCOM_IMG_COLOR_FormatYUV422SemiPlanar_h1v2;
+  case MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V1:
+    return (int)OMX_QCOM_IMG_COLOR_FormatYVU444SemiPlanar;
+  case MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V1:
+    return (int)OMX_QCOM_IMG_COLOR_FormatYUV444SemiPlanar;
+  case MM_JPEG_COLOR_FORMAT_MONOCHROME:
+     return (int)OMX_COLOR_FormatMonochrome;
+  default:
+    CDBG_ERROR("%s:%d] invalid format %d", __func__, __LINE__, color_fmt);
+    return (int)OMX_QCOM_IMG_COLOR_FormatYVU420SemiPlanar;
+  }
+}
+
+/** mm_jpeg_session_config_port:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure OMX ports
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_config_ports(mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  mm_jpeg_encode_params_t *p_params = &p_session->params;
+  OMX_CONFIG_ROTATIONTYPE rotate;
+
+  mm_jpeg_buf_t *p_src_buf =
+    &p_params->src_main_buf[0];
+
+  p_session->inputPort.nPortIndex = 0;
+  p_session->outputPort.nPortIndex = 1;
+  p_session->inputTmbPort.nPortIndex = 2;
+
+  ret = OMX_GetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+    &p_session->inputPort);
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  ret = OMX_GetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+    &p_session->inputTmbPort);
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  ret = OMX_GetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+    &p_session->outputPort);
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  p_session->inputPort.format.image.nFrameWidth =
+    (OMX_U32)p_params->main_dim.src_dim.width;
+  p_session->inputPort.format.image.nFrameHeight =
+    (OMX_U32)p_params->main_dim.src_dim.height;
+  p_session->inputPort.format.image.nStride =
+    p_src_buf->offset.mp[0].stride;
+  p_session->inputPort.format.image.nSliceHeight =
+    (OMX_U32)p_src_buf->offset.mp[0].scanline;
+  p_session->inputPort.format.image.eColorFormat =
+    map_jpeg_format(p_params->color_format);
+  p_session->inputPort.nBufferSize =
+    p_params->src_main_buf[0/*p_jobparams->src_index*/].buf_size;
+  p_session->inputPort.nBufferCountActual = (OMX_U32)p_params->num_src_bufs;
+  ret = OMX_SetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+    &p_session->inputPort);
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  if (p_session->params.encode_thumbnail) {
+    mm_jpeg_buf_t *p_tmb_buf =
+      &p_params->src_thumb_buf[0];
+    p_session->inputTmbPort.format.image.nFrameWidth =
+      (OMX_U32)p_params->thumb_dim.src_dim.width;
+    p_session->inputTmbPort.format.image.nFrameHeight =
+      (OMX_U32)p_params->thumb_dim.src_dim.height;
+    p_session->inputTmbPort.format.image.nStride =
+      p_tmb_buf->offset.mp[0].stride;
+    p_session->inputTmbPort.format.image.nSliceHeight =
+      (OMX_U32)p_tmb_buf->offset.mp[0].scanline;
+    p_session->inputTmbPort.format.image.eColorFormat =
+      map_jpeg_format(p_params->thumb_color_format);
+    p_session->inputTmbPort.nBufferSize =
+      p_params->src_thumb_buf[0].buf_size;
+    p_session->inputTmbPort.nBufferCountActual = (OMX_U32)p_params->num_tmb_bufs;
+
+    ret = OMX_SetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+      &p_session->inputTmbPort);
+
+    if (ret) {
+      CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+      return ret;
+    }
+
+    // Enable thumbnail port
+    ret = OMX_SendCommand(p_session->omx_handle, OMX_CommandPortEnable,
+        p_session->inputTmbPort.nPortIndex, NULL);
+
+    if (ret) {
+      CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+      return ret;
+    }
+  } else {
+    // Disable thumbnail port
+    ret = OMX_SendCommand(p_session->omx_handle, OMX_CommandPortDisable,
+        p_session->inputTmbPort.nPortIndex, NULL);
+
+    if (ret) {
+      CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+      return ret;
+    }
+  }
+
+  p_session->outputPort.nBufferSize =
+    p_params->dest_buf[0].buf_size;
+  p_session->outputPort.nBufferCountActual = (OMX_U32)p_params->num_dst_bufs;
+  ret = OMX_SetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+    &p_session->outputPort);
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  /* set rotation */
+  memset(&rotate, 0, sizeof(rotate));
+  rotate.nPortIndex = 1;
+  rotate.nRotation = (OMX_S32)p_params->rotation;
+  ret = OMX_SetConfig(p_session->omx_handle, OMX_IndexConfigCommonRotate,
+      &rotate);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+    return ret;
+  }
+  CDBG("%s:%d] Set rotation to %d at port_idx = %d", __func__, __LINE__,
+      (int)p_params->rotation, (int)rotate.nPortIndex);
+
+  return ret;
+}
+
+/** mm_jpeg_get_thumbnail_crop
+ *
+ *  Arguments:
+ *    @p_thumb_dim: thumbnail dimension
+ *    @p_main_dim: main image dimension
+ *    @crop_width : flag indicating if width needs to be cropped
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *    If the main image and thumbnail ascpect ratios are differnt,
+ *    re-calculate the thumbnail crop info to prevent distortion
+ *
+ */
+OMX_ERRORTYPE mm_jpeg_get_thumbnail_crop(mm_jpeg_dim_t *p_thumb_dim,
+  mm_jpeg_dim_t *p_main_dim, uint8_t crop_width) {
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  int cropped_width = 0, cropped_height = 0;
+
+  if (crop_width) {
+   //Keep height constant
+   cropped_height = p_thumb_dim->src_dim.height;
+   cropped_width = floor((cropped_height * p_thumb_dim->dst_dim.width) /
+      p_thumb_dim->dst_dim.height);
+  } else {
+    //Keep width constant
+    cropped_width = p_thumb_dim->src_dim.width;
+    cropped_height = floor((cropped_width * p_thumb_dim->dst_dim.height) /
+      p_thumb_dim->dst_dim.width);
+  }
+  p_thumb_dim->crop.left = floor(p_thumb_dim->src_dim.width - cropped_width) / 2;
+  p_thumb_dim->crop.top = floor(p_thumb_dim->src_dim.height - cropped_height) / 2;
+  p_thumb_dim->crop.width = cropped_width;
+  p_thumb_dim->crop.height = cropped_height;
+
+  CDBG_HIGH("%s %d New thumbnail crop: left %d, top %d, crop width %d, crop height %d",
+    __func__, __LINE__, p_thumb_dim->crop.left, p_thumb_dim->crop.top,
+    p_thumb_dim->crop.width, p_thumb_dim->crop.height);
+
+  return ret;
+}
+
+/** mm_jpeg_omx_config_thumbnail:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure OMX ports
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_config_thumbnail(mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  QOMX_THUMBNAIL_INFO thumbnail_info;
+  OMX_INDEXTYPE thumb_indextype;
+  mm_jpeg_encode_params_t *p_params = &p_session->params;
+  mm_jpeg_encode_job_t *p_jobparams = &p_session->encode_job;
+  mm_jpeg_dim_t *p_thumb_dim = &p_jobparams->thumb_dim;
+  mm_jpeg_dim_t *p_main_dim = &p_jobparams->main_dim;
+  QOMX_YUV_FRAME_INFO *p_frame_info = &thumbnail_info.tmbOffset;
+  mm_jpeg_buf_t *p_tmb_buf = &p_params->src_thumb_buf[p_jobparams->thumb_index];
+
+  CDBG_HIGH("%s:%d] encode_thumbnail %u", __func__, __LINE__,
+    p_params->encode_thumbnail);
+  if (OMX_FALSE == p_params->encode_thumbnail) {
+    return ret;
+  }
+
+  if ((p_thumb_dim->dst_dim.width == 0) || (p_thumb_dim->dst_dim.height == 0)) {
+    CDBG_ERROR("%s:%d] Error invalid output dim for thumbnail",
+      __func__, __LINE__);
+    return OMX_ErrorBadParameter;
+  }
+
+  if ((p_thumb_dim->src_dim.width == 0) || (p_thumb_dim->src_dim.height == 0)) {
+    CDBG_ERROR("%s:%d] Error invalid input dim for thumbnail",
+      __func__, __LINE__);
+    return OMX_ErrorBadParameter;
+  }
+
+  if ((p_thumb_dim->crop.width == 0) || (p_thumb_dim->crop.height == 0)) {
+    p_thumb_dim->crop.width = p_thumb_dim->src_dim.width;
+    p_thumb_dim->crop.height = p_thumb_dim->src_dim.height;
+  }
+
+  /* check crop boundary */
+  if ((p_thumb_dim->crop.width + p_thumb_dim->crop.left > p_thumb_dim->src_dim.width) ||
+    (p_thumb_dim->crop.height + p_thumb_dim->crop.top > p_thumb_dim->src_dim.height)) {
+    CDBG_ERROR("%s:%d] invalid crop boundary (%d, %d) offset (%d, %d) out of (%d, %d)",
+      __func__, __LINE__,
+      p_thumb_dim->crop.width,
+      p_thumb_dim->crop.height,
+      p_thumb_dim->crop.left,
+      p_thumb_dim->crop.top,
+      p_thumb_dim->src_dim.width,
+      p_thumb_dim->src_dim.height);
+    return OMX_ErrorBadParameter;
+  }
+
+  memset(&thumbnail_info, 0x0, sizeof(QOMX_THUMBNAIL_INFO));
+  ret = OMX_GetExtensionIndex(p_session->omx_handle,
+    QOMX_IMAGE_EXT_THUMBNAIL_NAME,
+    &thumb_indextype);
+  if (ret) {
+    CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+    return ret;
+  }
+
+  /* fill thumbnail info */
+  thumbnail_info.scaling_enabled = 1;
+  thumbnail_info.input_width = (OMX_U32)p_thumb_dim->src_dim.width;
+  thumbnail_info.input_height = (OMX_U32)p_thumb_dim->src_dim.height;
+  thumbnail_info.rotation = (OMX_U32)p_params->thumb_rotation;
+  thumbnail_info.quality = (OMX_U32)p_params->thumb_quality;
+  thumbnail_info.output_width = (OMX_U32)p_thumb_dim->dst_dim.width;
+  thumbnail_info.output_height = (OMX_U32)p_thumb_dim->dst_dim.height;
+
+  if (p_session->thumb_from_main) {
+    if ((p_session->params.thumb_rotation == 90 ||
+      p_session->params.thumb_rotation == 270) &&
+      (p_session->params.rotation == 0 ||
+      p_session->params.rotation == 180)) {
+
+      thumbnail_info.output_width = (OMX_U32)p_thumb_dim->dst_dim.height;
+      thumbnail_info.output_height = (OMX_U32)p_thumb_dim->dst_dim.width;
+      thumbnail_info.rotation = p_session->params.rotation;
+    }
+  } else if ((p_thumb_dim->dst_dim.width > p_thumb_dim->src_dim.width) ||
+    (p_thumb_dim->dst_dim.height > p_thumb_dim->src_dim.height)) {
+    CDBG_ERROR("%s:%d] Incorrect thumbnail dim %dx%d resetting to %dx%d",
+      __func__, __LINE__, p_thumb_dim->dst_dim.width,
+      p_thumb_dim->dst_dim.height, p_thumb_dim->src_dim.width,
+      p_thumb_dim->src_dim.height);
+    thumbnail_info.output_width = (OMX_U32)p_thumb_dim->src_dim.width;
+    thumbnail_info.output_height = (OMX_U32)p_thumb_dim->src_dim.height;
+  }
+
+  //If the main image and thumbnail aspect ratio are different, reset the
+  // thumbnail crop info to avoid distortion
+  double main_aspect_ratio = (double)p_main_dim->dst_dim.width /
+    (double)p_main_dim->dst_dim.height;
+  double thumb_aspect_ratio = (double)thumbnail_info.output_width /
+    (double)thumbnail_info.output_height;
+
+  if ((thumb_aspect_ratio - main_aspect_ratio) > ASPECT_TOLERANCE) {
+    mm_jpeg_get_thumbnail_crop(p_thumb_dim, p_main_dim, 0);
+  } else if((main_aspect_ratio - thumb_aspect_ratio) > ASPECT_TOLERANCE){
+    mm_jpeg_get_thumbnail_crop(p_thumb_dim, p_main_dim, 1);
+  }
+
+  //Fill thumbnail crop info
+  thumbnail_info.crop_info.nWidth = (OMX_U32)p_thumb_dim->crop.width;
+  thumbnail_info.crop_info.nHeight = (OMX_U32)p_thumb_dim->crop.height;
+  thumbnail_info.crop_info.nLeft = p_thumb_dim->crop.left;
+  thumbnail_info.crop_info.nTop = p_thumb_dim->crop.top;
+
+  //If main image cropping/scaling is enabled, thumb FOV should be within
+  //main image FOV
+  if ((p_main_dim->crop.width != p_main_dim->src_dim.width) ||
+    (p_main_dim->crop.height != p_main_dim->src_dim.height)) {
+    if ((p_thumb_dim->crop.left < p_main_dim->crop.left) ||
+      ((p_thumb_dim->crop.left + p_thumb_dim->crop.width) >
+      (p_main_dim->crop.left + p_main_dim->crop.width)) ||
+      (p_thumb_dim->crop.top < p_main_dim->crop.top) ||
+      ((p_thumb_dim->crop.top + p_thumb_dim->crop.height) >
+      (p_main_dim->crop.top + p_main_dim->crop.height))) {
+       //Reset the FOV for the thumbnail
+       CDBG_HIGH("%s:%d] Resetting the thumbnail FOV wrt main image",
+         __func__, __LINE__);
+       thumbnail_info.crop_info.nLeft = p_main_dim->crop.left;
+       thumbnail_info.crop_info.nTop = p_main_dim->crop.height;
+       if ((p_thumb_dim->crop.width > p_main_dim->crop.width) ||
+         (p_thumb_dim->crop.height > p_main_dim->crop.height)) {
+         thumbnail_info.crop_info.nWidth = p_main_dim->crop.width;
+         thumbnail_info.crop_info.nHeight = p_main_dim->crop.height;
+       }
+     }
+  }
+
+  memset(p_frame_info, 0x0, sizeof(*p_frame_info));
+
+  p_frame_info->cbcrStartOffset[0] = p_tmb_buf->offset.mp[0].len;
+  p_frame_info->cbcrStartOffset[1] = p_tmb_buf->offset.mp[1].len;
+  p_frame_info->yOffset = p_tmb_buf->offset.mp[0].offset;
+  p_frame_info->cbcrOffset[0] = p_tmb_buf->offset.mp[1].offset;
+  p_frame_info->cbcrOffset[1] = p_tmb_buf->offset.mp[2].offset;
+
+  ret = OMX_SetConfig(p_session->omx_handle, thumb_indextype,
+    &thumbnail_info);
+  if (ret) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    return ret;
+  }
+
+  return ret;
+}
+
+/** mm_jpeg_session_config_main_crop:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure main image crop
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_config_main_crop(mm_jpeg_job_session_t *p_session)
+{
+  OMX_CONFIG_RECTTYPE rect_type_in, rect_type_out;
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  mm_jpeg_encode_job_t *p_jobparams = &p_session->encode_job;
+  mm_jpeg_dim_t *dim = &p_jobparams->main_dim;
+
+  if ((dim->crop.width == 0) || (dim->crop.height == 0)) {
+    dim->crop.width = dim->src_dim.width;
+    dim->crop.height = dim->src_dim.height;
+  }
+  /* error check first */
+  if ((dim->crop.width + dim->crop.left > dim->src_dim.width) ||
+    (dim->crop.height + dim->crop.top > dim->src_dim.height)) {
+    CDBG_ERROR("%s:%d] invalid crop boundary (%d, %d) out of (%d, %d)",
+      __func__, __LINE__,
+      dim->crop.width + dim->crop.left,
+      dim->crop.height + dim->crop.top,
+      dim->src_dim.width,
+      dim->src_dim.height);
+    return OMX_ErrorBadParameter;
+  }
+
+  memset(&rect_type_in, 0, sizeof(rect_type_in));
+  memset(&rect_type_out, 0, sizeof(rect_type_out));
+  rect_type_in.nPortIndex = 0;
+  rect_type_out.nPortIndex = 0;
+
+  if ((dim->src_dim.width != dim->crop.width) ||
+    (dim->src_dim.height != dim->crop.height) ||
+    (dim->src_dim.width != dim->dst_dim.width) ||
+    (dim->src_dim.height != dim->dst_dim.height)) {
+    /* Scaler information */
+    rect_type_in.nWidth = CEILING2(dim->crop.width);
+    rect_type_in.nHeight = CEILING2(dim->crop.height);
+    rect_type_in.nLeft = dim->crop.left;
+    rect_type_in.nTop = dim->crop.top;
+
+    if (dim->dst_dim.width && dim->dst_dim.height) {
+      rect_type_out.nWidth = (OMX_U32)dim->dst_dim.width;
+      rect_type_out.nHeight = (OMX_U32)dim->dst_dim.height;
+    }
+  }
+
+  ret = OMX_SetConfig(p_session->omx_handle, OMX_IndexConfigCommonInputCrop,
+    &rect_type_in);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    return ret;
+  }
+
+  CDBG_HIGH("%s:%d] OMX_IndexConfigCommonInputCrop w = %d, h = %d, l = %d, t = %d,"
+    " port_idx = %d", __func__, __LINE__,
+    (int)rect_type_in.nWidth, (int)rect_type_in.nHeight,
+    (int)rect_type_in.nLeft, (int)rect_type_in.nTop,
+    (int)rect_type_in.nPortIndex);
+
+  ret = OMX_SetConfig(p_session->omx_handle, OMX_IndexConfigCommonOutputCrop,
+    &rect_type_out);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    return ret;
+  }
+  CDBG("%s:%d] OMX_IndexConfigCommonOutputCrop w = %d, h = %d,"
+    " port_idx = %d", __func__, __LINE__,
+    (int)rect_type_out.nWidth, (int)rect_type_out.nHeight,
+    (int)rect_type_out.nPortIndex);
+
+  return ret;
+}
+
+/** mm_jpeg_session_config_main:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure main image
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_config_main(mm_jpeg_job_session_t *p_session)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+
+  /* config port */
+  CDBG_HIGH("%s:%d] config port", __func__, __LINE__);
+  rc = mm_jpeg_session_config_ports(p_session);
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s: config port failed", __func__);
+    return rc;
+  }
+
+  /* config buffer offset */
+  CDBG("%s:%d] config main buf offset", __func__, __LINE__);
+  rc = mm_jpeg_session_config_main_buffer_offset(p_session);
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s: config buffer offset failed", __func__);
+    return rc;
+  }
+
+  /* set the encoding mode */
+  rc = mm_jpeg_encoding_mode(p_session);
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s: config encoding mode failed", __func__);
+    return rc;
+  }
+
+  /* set the metadata encrypt key */
+  rc = mm_jpeg_meta_enc_key(p_session);
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s: config session failed", __func__);
+    return rc;
+  }
+
+  /* set the mem ops */
+  rc = mm_jpeg_mem_ops(p_session);
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s: config mem ops failed", __func__);
+    return rc;
+  }
+  /* set the jpeg speed mode */
+  rc = mm_jpeg_speed_mode(p_session);
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s: config speed mode failed", __func__);
+    return rc;
+  }
+
+  return rc;
+}
+
+/** mm_jpeg_session_config_common:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure common parameters
+ *
+ **/
+OMX_ERRORTYPE mm_jpeg_session_config_common(mm_jpeg_job_session_t *p_session)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+  OMX_INDEXTYPE exif_idx;
+  OMX_CONFIG_ROTATIONTYPE rotate;
+  mm_jpeg_encode_job_t *p_jobparams = &p_session->encode_job;
+  QOMX_EXIF_INFO exif_info;
+
+  /* set rotation */
+  memset(&rotate, 0, sizeof(rotate));
+  rotate.nPortIndex = 1;
+  rotate.nRotation = (OMX_S32)p_jobparams->rotation;
+  rc = OMX_SetConfig(p_session->omx_handle, OMX_IndexConfigCommonRotate,
+    &rotate);
+  if (OMX_ErrorNone != rc) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, rc);
+      return rc;
+  }
+  CDBG("%s:%d] Set rotation to %d at port_idx = %d", __func__, __LINE__,
+    (int)p_jobparams->rotation, (int)rotate.nPortIndex);
+
+  /* Set Exif data*/
+  memset(&p_session->exif_info_local[0], 0, sizeof(p_session->exif_info_local));
+  rc = OMX_GetExtensionIndex(p_session->omx_handle, QOMX_IMAGE_EXT_EXIF_NAME,
+    &exif_idx);
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, rc);
+    return rc;
+  }
+
+  CDBG("%s:%d] Num of exif entries passed from HAL: %d", __func__, __LINE__,
+      (int)p_jobparams->exif_info.numOfEntries);
+  if (p_jobparams->exif_info.numOfEntries > 0) {
+    rc = OMX_SetConfig(p_session->omx_handle, exif_idx,
+        &p_jobparams->exif_info);
+    if (OMX_ErrorNone != rc) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, rc);
+      return rc;
+    }
+  }
+  /*parse aditional exif data from the metadata*/
+  exif_info.numOfEntries = 0;
+  exif_info.exif_data = &p_session->exif_info_local[0];
+  process_meta_data(p_jobparams->p_metadata, &exif_info,
+    &p_jobparams->cam_exif_params, p_jobparams->hal_version);
+  /* After Parse metadata */
+  p_session->exif_count_local = (int)exif_info.numOfEntries;
+
+  if (exif_info.numOfEntries > 0) {
+    /* set exif tags */
+    CDBG("%s:%d] exif tags from metadata count %d", __func__, __LINE__,
+      (int)exif_info.numOfEntries);
+
+    rc = OMX_SetConfig(p_session->omx_handle, exif_idx,
+      &exif_info);
+    if (OMX_ErrorNone != rc) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, rc);
+      return rc;
+    }
+  }
+
+  return rc;
+}
+
+
+
+
+/** mm_jpeg_session_abort:
+ *
+ *  Arguments:
+ *    @p_session: jpeg session
+ *
+ *  Return:
+ *       OMX_BOOL
+ *
+ *  Description:
+ *       Abort ongoing job
+ *
+ **/
+OMX_BOOL mm_jpeg_session_abort(mm_jpeg_job_session_t *p_session)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  int rc = 0;
+
+  CDBG("%s:%d] E", __func__, __LINE__);
+  pthread_mutex_lock(&p_session->lock);
+  if (MM_JPEG_ABORT_NONE != p_session->abort_state) {
+    pthread_mutex_unlock(&p_session->lock);
+    CDBG_HIGH("%s:%d] **** ALREADY ABORTED", __func__, __LINE__);
+    return 0;
+  }
+  p_session->abort_state = MM_JPEG_ABORT_INIT;
+  if (OMX_TRUE == p_session->encoding) {
+    p_session->state_change_pending = OMX_TRUE;
+
+    CDBG_HIGH("%s:%d] **** ABORTING", __func__, __LINE__);
+    pthread_mutex_unlock(&p_session->lock);
+
+    ret = OMX_SendCommand(p_session->omx_handle, OMX_CommandStateSet,
+    OMX_StateIdle, NULL);
+
+    if (ret != OMX_ErrorNone) {
+      CDBG_ERROR("%s:%d] OMX_SendCommand returned error %d", __func__, __LINE__, ret);
+      return 1;
+    }
+    rc = mm_jpegenc_destroy_job(p_session);
+    if (rc != 0) {
+      CDBG_ERROR("%s:%d] Destroy job returned error %d", __func__, __LINE__, rc);
+    }
+
+    pthread_mutex_lock(&p_session->lock);
+    if (MM_JPEG_ABORT_INIT == p_session->abort_state) {
+      CDBG("%s:%d] before wait", __func__, __LINE__);
+      pthread_cond_wait(&p_session->cond, &p_session->lock);
+    }
+    CDBG("%s:%d] after wait", __func__, __LINE__);
+  }
+  p_session->abort_state = MM_JPEG_ABORT_DONE;
+  pthread_mutex_unlock(&p_session->lock);
+
+
+  // Abort next session
+  if (p_session->next_session) {
+    mm_jpeg_session_abort(p_session->next_session);
+  }
+
+  CDBG("%s:%d] X", __func__, __LINE__);
+  return 0;
+}
+
+
+/** mm_jpeg_configure_params
+ *
+ *  Arguments:
+ *    @p_session: encode session
+ *
+ *  Return:
+ *       none
+ *
+ *  Description:
+ *       Configure the job specific params
+ *
+ **/
+static OMX_ERRORTYPE mm_jpeg_configure_job_params(
+  mm_jpeg_job_session_t *p_session)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  OMX_IMAGE_PARAM_QFACTORTYPE q_factor;
+  QOMX_WORK_BUFFER work_buffer;
+  OMX_INDEXTYPE work_buffer_index;
+  mm_jpeg_encode_params_t *p_params = &p_session->params;
+  mm_jpeg_encode_job_t *p_jobparams = &p_session->encode_job;
+  int i;
+
+  /* common config */
+  ret = mm_jpeg_session_config_common(p_session);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s:%d] config common failed", __func__, __LINE__);
+
+  }
+
+  /* config Main Image crop */
+  CDBG("%s:%d] config main crop", __func__, __LINE__);
+  ret = mm_jpeg_session_config_main_crop(p_session);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s: config crop failed", __func__);
+    return ret;
+  }
+
+  /* set quality */
+  memset(&q_factor, 0, sizeof(q_factor));
+  q_factor.nPortIndex = 0;
+  q_factor.nQFactor = p_params->quality;
+  ret = OMX_SetConfig(p_session->omx_handle, OMX_IndexParamQFactor, &q_factor);
+  CDBG("%s:%d] config QFactor: %d", __func__, __LINE__, (int)q_factor.nQFactor);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s:%d] Error setting Q factor %d", __func__, __LINE__, ret);
+    return ret;
+  }
+
+  /* config thumbnail */
+  ret = mm_jpeg_session_config_thumbnail(p_session);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s:%d] config thumbnail img failed", __func__, __LINE__);
+    return ret;
+  }
+
+  //Pass the ION buffer to be used as o/p for HW
+  memset(&work_buffer, 0x0, sizeof(QOMX_WORK_BUFFER));
+  ret = OMX_GetExtensionIndex(p_session->omx_handle,
+    QOMX_IMAGE_EXT_WORK_BUFFER_NAME,
+    &work_buffer_index);
+  if (ret) {
+    CDBG_ERROR("%s:%d] Error getting work buffer index %d",
+      __func__, __LINE__, ret);
+    return ret;
+  }
+  work_buffer.fd = p_session->work_buffer.p_pmem_fd;
+  work_buffer.vaddr = p_session->work_buffer.addr;
+  work_buffer.length = (uint32_t)p_session->work_buffer.size;
+  CDBG_ERROR("%s:%d] Work buffer info %d %p WorkBufSize: %d invalidate", __func__, __LINE__,
+    work_buffer.fd, work_buffer.vaddr, work_buffer.length);
+
+  buffer_invalidate(&p_session->work_buffer);
+
+  ret = OMX_SetConfig(p_session->omx_handle, work_buffer_index,
+    &work_buffer);
+  if (ret) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    return ret;
+  }
+
+  /* set metadata */
+  ret = mm_jpeg_metadata(p_session);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s: config makernote data failed", __func__);
+    return ret;
+  }
+
+  /* set QTable */
+  for (i = 0; i < QTABLE_MAX; i++) {
+    if (p_jobparams->qtable_set[i]) {
+      ret = OMX_SetConfig(p_session->omx_handle,
+        OMX_IndexParamQuantizationTable, &p_jobparams->qtable[i]);
+      if (OMX_ErrorNone != ret) {
+        CDBG_ERROR("%s:%d] set QTable Error", __func__, __LINE__);
+        return ret;
+      }
+    }
+  }
+
+  return ret;
+}
+
+/** mm_jpeg_session_configure:
+ *
+ *  Arguments:
+ *    @data: encode session
+ *
+ *  Return:
+ *       none
+ *
+ *  Description:
+ *       Configure the session
+ *
+ **/
+static OMX_ERRORTYPE mm_jpeg_session_configure(mm_jpeg_job_session_t *p_session)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+
+  CDBG("%s:%d] E ", __func__, __LINE__);
+
+  MM_JPEG_CHK_ABORT(p_session, ret, error);
+
+  /* config main img */
+  ret = mm_jpeg_session_config_main(p_session);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s:%d] config main img failed", __func__, __LINE__);
+    goto error;
+  }
+  ret = mm_jpeg_session_change_state(p_session, OMX_StateIdle,
+    mm_jpeg_session_send_buffers);
+  if (ret) {
+    CDBG_ERROR("%s:%d] change state to idle failed %d",
+      __func__, __LINE__, ret);
+    goto error;
+  }
+
+  ret = mm_jpeg_session_change_state(p_session, OMX_StateExecuting,
+    NULL);
+  if (ret) {
+    CDBG_ERROR("%s:%d] change state to executing failed %d",
+      __func__, __LINE__, ret);
+    goto error;
+  }
+
+error:
+  CDBG("%s:%d] X ret %d", __func__, __LINE__, ret);
+  return ret;
+}
+
+
+
+
+
+
+/** mm_jpeg_session_encode:
+ *
+ *  Arguments:
+ *    @p_session: encode session
+ *
+ *  Return:
+ *       OMX_ERRORTYPE
+ *
+ *  Description:
+ *       Start the encoding
+ *
+ **/
+static OMX_ERRORTYPE mm_jpeg_session_encode(mm_jpeg_job_session_t *p_session)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  mm_jpeg_encode_job_t *p_jobparams = &p_session->encode_job;
+
+  pthread_mutex_lock(&p_session->lock);
+  p_session->abort_state = MM_JPEG_ABORT_NONE;
+  p_session->encoding = OMX_FALSE;
+  pthread_mutex_unlock(&p_session->lock);
+
+  if (p_session->thumb_from_main) {
+    if (0 > p_jobparams->src_index) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+      ret = OMX_ErrorUnsupportedIndex;
+      goto error;
+    }
+    p_jobparams->thumb_index = (uint32_t)p_jobparams->src_index;
+    p_jobparams->thumb_dim.crop = p_jobparams->main_dim.crop;
+  }
+
+  if (OMX_FALSE == p_session->config) {
+    ret = mm_jpeg_session_configure(p_session);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+      goto error;
+    }
+    p_session->config = OMX_TRUE;
+  }
+
+  ret = mm_jpeg_configure_job_params(p_session);
+  if (ret) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+      goto error;
+  }
+  pthread_mutex_lock(&p_session->lock);
+  p_session->encoding = OMX_TRUE;
+  pthread_mutex_unlock(&p_session->lock);
+
+  MM_JPEG_CHK_ABORT(p_session, ret, error);
+
+#ifdef MM_JPEG_DUMP_INPUT
+  char filename[256];
+  snprintf(filename, sizeof(filename),
+      QCAMERA_DUMP_FRM_LOCATION"jpeg/mm_jpeg_int%d.yuv", p_session->ebd_count);
+  DUMP_TO_FILE(filename,
+    p_session->p_in_omx_buf[p_jobparams->src_index]->pBuffer,
+    (size_t)p_session->p_in_omx_buf[p_jobparams->src_index]->nAllocLen);
+#endif
+
+  ret = OMX_EmptyThisBuffer(p_session->omx_handle,
+    p_session->p_in_omx_buf[p_jobparams->src_index]);
+  if (ret) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    goto error;
+  }
+
+  if (p_session->params.encode_thumbnail) {
+#ifdef MM_JPEG_DUMP_INPUT
+  char thumb_filename[FILENAME_MAX];
+  snprintf(thumb_filename, sizeof(thumb_filename),
+    QCAMERA_DUMP_FRM_LOCATION"jpeg/mm_jpeg_int_t%d.yuv", p_session->ebd_count);
+  DUMP_TO_FILE(filename,
+    p_session->p_in_omx_thumb_buf[p_jobparams->thumb_index]->pBuffer,
+    (size_t)p_session->p_in_omx_thumb_buf[p_jobparams->thumb_index]->nAllocLen);
+#endif
+    ret = OMX_EmptyThisBuffer(p_session->omx_handle,
+        p_session->p_in_omx_thumb_buf[p_jobparams->thumb_index]);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+      goto error;
+    }
+  }
+
+  ret = OMX_FillThisBuffer(p_session->omx_handle,
+    p_session->p_out_omx_buf[p_jobparams->dst_index]);
+  if (ret) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    goto error;
+  }
+
+  MM_JPEG_CHK_ABORT(p_session, ret, error);
+
+error:
+
+  CDBG("%s:%d] X ", __func__, __LINE__);
+  return ret;
+}
+
+/** mm_jpeg_process_encoding_job:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg client
+ *    @job_node: job node
+ *
+ *  Return:
+ *       0 for success -1 otherwise
+ *
+ *  Description:
+ *       Start the encoding job
+ *
+ **/
+int32_t mm_jpeg_process_encoding_job(mm_jpeg_obj *my_obj, mm_jpeg_job_q_node_t* job_node)
+{
+  mm_jpeg_q_data_t qdata;
+  int32_t rc = 0;
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  mm_jpeg_job_session_t *p_session = NULL;
+  uint32_t buf_idx;
+
+  /* check if valid session */
+  p_session = mm_jpeg_get_session(my_obj, job_node->enc_info.job_id);
+  if (NULL == p_session) {
+    CDBG_ERROR("%s:%d] invalid job id %x", __func__, __LINE__,
+        job_node->enc_info.job_id);
+    return -1;
+  }
+
+  CDBG("%s:%d] before dequeue session %d",
+                __func__, __LINE__, ret);
+
+  /* dequeue available omx handle */
+  qdata = mm_jpeg_queue_deq(p_session->session_handle_q);
+  p_session = qdata.p;
+
+  if (NULL == p_session) {
+    CDBG_HIGH("%s:%d] No available sessions %d",
+          __func__, __LINE__, ret);
+    /* No available handles */
+    qdata.p = job_node;
+    mm_jpeg_queue_enq_head(&my_obj->job_mgr.job_queue, qdata);
+
+    CDBG_HIGH("%s:%d]end enqueue %d",
+              __func__, __LINE__, ret);
+    return rc;
+
+  }
+
+  p_session->auto_out_buf = OMX_FALSE;
+  if (job_node->enc_info.encode_job.dst_index < 0) {
+    /* dequeue available output buffer idx */
+    qdata = mm_jpeg_queue_deq(p_session->out_buf_q);
+    buf_idx = qdata.u32;
+
+    if (0U == buf_idx) {
+      CDBG_ERROR("%s:%d] No available output buffers %d",
+          __func__, __LINE__, ret);
+      return OMX_ErrorUndefined;
+    }
+
+    buf_idx--;
+
+    job_node->enc_info.encode_job.dst_index = (int32_t)buf_idx;
+    p_session->auto_out_buf = OMX_TRUE;
+  }
+
+  /* sent encode cmd to OMX, queue job into ongoing queue */
+  qdata.p = job_node;
+  rc = mm_jpeg_queue_enq(&my_obj->ongoing_job_q, qdata);
+  if (rc) {
+    CDBG_ERROR("%s:%d] jpeg enqueue failed %d",
+      __func__, __LINE__, ret);
+    goto error;
+  }
+
+  p_session->encode_job = job_node->enc_info.encode_job;
+  p_session->jobId = job_node->enc_info.job_id;
+  ret = mm_jpeg_session_encode(p_session);
+  if (ret) {
+    CDBG_ERROR("%s:%d] encode session failed", __func__, __LINE__);
+    goto error;
+  }
+
+  CDBG_HIGH("%s:%d] Success X ", __func__, __LINE__);
+  return rc;
+
+error:
+
+  if ((OMX_ErrorNone != ret) &&
+    (NULL != p_session->params.jpeg_cb)) {
+    p_session->job_status = JPEG_JOB_STATUS_ERROR;
+    CDBG_ERROR("%s:%d] send jpeg error callback %d", __func__, __LINE__,
+      p_session->job_status);
+    p_session->params.jpeg_cb(p_session->job_status,
+      p_session->client_hdl,
+      p_session->jobId,
+      NULL,
+      p_session->params.userdata);
+  }
+
+  /*remove the job*/
+  mm_jpegenc_job_done(p_session);
+  CDBG("%s:%d] Error X ", __func__, __LINE__);
+
+  return rc;
+}
+
+
+
+/** mm_jpeg_jobmgr_thread:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       job manager thread main function
+ *
+ **/
+static void *mm_jpeg_jobmgr_thread(void *data)
+{
+  mm_jpeg_q_data_t qdata;
+  int rc = 0;
+  int running = 1;
+  uint32_t num_ongoing_jobs = 0;
+  mm_jpeg_obj *my_obj = (mm_jpeg_obj*)data;
+  mm_jpeg_job_cmd_thread_t *cmd_thread = &my_obj->job_mgr;
+  mm_jpeg_job_q_node_t* node = NULL;
+  prctl(PR_SET_NAME, (unsigned long)"mm_jpeg_thread", 0, 0, 0);
+
+  do {
+    do {
+      rc = cam_sem_wait(&cmd_thread->job_sem);
+      if (rc != 0 && errno != EINVAL) {
+        CDBG_ERROR("%s: cam_sem_wait error (%s)",
+          __func__, strerror(errno));
+        return NULL;
+      }
+    } while (rc != 0);
+
+    /* check ongoing q size */
+    num_ongoing_jobs = mm_jpeg_queue_get_size(&my_obj->ongoing_job_q);
+
+    CDBG("%s:%d] ongoing job  %d %d", __func__,
+      __LINE__, num_ongoing_jobs, MM_JPEG_CONCURRENT_SESSIONS_COUNT);
+    if (num_ongoing_jobs >= MM_JPEG_CONCURRENT_SESSIONS_COUNT) {
+      CDBG_ERROR("%s:%d] ongoing job already reach max %d", __func__,
+        __LINE__, num_ongoing_jobs);
+      continue;
+    }
+
+    pthread_mutex_lock(&my_obj->job_lock);
+    /* can go ahead with new work */
+    qdata = mm_jpeg_queue_deq(&cmd_thread->job_queue);
+    node = (mm_jpeg_job_q_node_t*)qdata.p;
+    if (node != NULL) {
+      switch (node->type) {
+      case MM_JPEG_CMD_TYPE_JOB:
+        rc = mm_jpeg_process_encoding_job(my_obj, node);
+        break;
+      case MM_JPEG_CMD_TYPE_DECODE_JOB:
+        rc = mm_jpegdec_process_decoding_job(my_obj, node);
+        break;
+      case MM_JPEG_CMD_TYPE_EXIT:
+      default:
+        /* free node */
+        free(node);
+        /* set running flag to false */
+        running = 0;
+        break;
+      }
+    }
+    pthread_mutex_unlock(&my_obj->job_lock);
+
+  } while (running);
+  return NULL;
+}
+
+/** mm_jpeg_jobmgr_thread_launch:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       launches the job manager thread
+ *
+ **/
+int32_t mm_jpeg_jobmgr_thread_launch(mm_jpeg_obj *my_obj)
+{
+  int32_t rc = 0;
+  mm_jpeg_job_cmd_thread_t *job_mgr = &my_obj->job_mgr;
+
+  cam_sem_init(&job_mgr->job_sem, 0);
+  mm_jpeg_queue_init(&job_mgr->job_queue);
+
+  /* launch the thread */
+  pthread_create(&job_mgr->pid,
+    NULL,
+    mm_jpeg_jobmgr_thread,
+    (void *)my_obj);
+  pthread_setname_np(job_mgr->pid, "CAM_jpeg_jobmgr");
+  return rc;
+}
+
+/** mm_jpeg_jobmgr_thread_release:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Releases the job manager thread
+ *
+ **/
+int32_t mm_jpeg_jobmgr_thread_release(mm_jpeg_obj * my_obj)
+{
+  mm_jpeg_q_data_t qdata;
+  int32_t rc = 0;
+  mm_jpeg_job_cmd_thread_t * cmd_thread = &my_obj->job_mgr;
+  mm_jpeg_job_q_node_t* node =
+    (mm_jpeg_job_q_node_t *)malloc(sizeof(mm_jpeg_job_q_node_t));
+  if (NULL == node) {
+    CDBG_ERROR("%s: No memory for mm_jpeg_job_q_node_t", __func__);
+    return -1;
+  }
+
+  memset(node, 0, sizeof(mm_jpeg_job_q_node_t));
+  node->type = MM_JPEG_CMD_TYPE_EXIT;
+
+  qdata.p = node;
+  mm_jpeg_queue_enq(&cmd_thread->job_queue, qdata);
+  cam_sem_post(&cmd_thread->job_sem);
+
+  /* wait until cmd thread exits */
+  if (pthread_join(cmd_thread->pid, NULL) != 0) {
+    CDBG("%s: pthread dead already", __func__);
+  }
+  mm_jpeg_queue_deinit(&cmd_thread->job_queue);
+
+  cam_sem_destroy(&cmd_thread->job_sem);
+  memset(cmd_thread, 0, sizeof(mm_jpeg_job_cmd_thread_t));
+  return rc;
+}
+
+/** mm_jpeg_init:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Initializes the jpeg client
+ *
+ **/
+int32_t mm_jpeg_init(mm_jpeg_obj *my_obj)
+{
+  int32_t rc = 0;
+  uint32_t work_buf_size;
+  unsigned int i = 0;
+  unsigned int initial_workbufs_cnt = 1;
+
+  /* init locks */
+  pthread_mutex_init(&my_obj->job_lock, NULL);
+
+  /* init ongoing job queue */
+  rc = mm_jpeg_queue_init(&my_obj->ongoing_job_q);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    pthread_mutex_destroy(&my_obj->job_lock);
+    return -1;
+  }
+
+
+  /* init job semaphore and launch jobmgr thread */
+  CDBG("%s:%d] Launch jobmgr thread rc %d", __func__, __LINE__, rc);
+  rc = mm_jpeg_jobmgr_thread_launch(my_obj);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    mm_jpeg_queue_deinit(&my_obj->ongoing_job_q);
+    pthread_mutex_destroy(&my_obj->job_lock);
+    return -1;
+  }
+
+  /* set work buf size from max picture size */
+  if (my_obj->max_pic_w <= 0 || my_obj->max_pic_h <= 0) {
+    CDBG_ERROR("%s:%d] Width and height are not valid "
+      "dimensions, cannot calc work buf size",__func__, __LINE__);
+    mm_jpeg_jobmgr_thread_release(my_obj);
+    mm_jpeg_queue_deinit(&my_obj->ongoing_job_q);
+    pthread_mutex_destroy(&my_obj->job_lock);
+    return -1;
+  }
+  work_buf_size = CEILING64((uint32_t)my_obj->max_pic_w) *
+    CEILING64((uint32_t)my_obj->max_pic_h) * 3U / 2U;
+  for (i = 0; i < initial_workbufs_cnt; i++) {
+    my_obj->ionBuffer[i].size = CEILING32(work_buf_size);
+    CDBG_HIGH("Max picture size %d x %d, WorkBufSize = %zu",
+        my_obj->max_pic_w, my_obj->max_pic_h, my_obj->ionBuffer[i].size);
+
+    my_obj->ionBuffer[i].addr = (uint8_t *)buffer_allocate(&my_obj->ionBuffer[i], 1);
+    if (NULL == my_obj->ionBuffer[i].addr) {
+      while (i--) {
+        buffer_deallocate(&my_obj->ionBuffer[i]);
+      }
+      mm_jpeg_jobmgr_thread_release(my_obj);
+      mm_jpeg_queue_deinit(&my_obj->ongoing_job_q);
+      pthread_mutex_destroy(&my_obj->job_lock);
+      CDBG_ERROR("%s:%d] Ion allocation failed",__func__, __LINE__);
+      return -1;
+    }
+  }
+
+  my_obj->work_buf_cnt = i;
+
+  /* load OMX */
+  if (OMX_ErrorNone != OMX_Init()) {
+    /* roll back in error case */
+    CDBG_ERROR("%s:%d] OMX_Init failed (%d)", __func__, __LINE__, rc);
+    for (i = 0; i < initial_workbufs_cnt; i++) {
+      buffer_deallocate(&my_obj->ionBuffer[i]);
+    }
+    mm_jpeg_jobmgr_thread_release(my_obj);
+    mm_jpeg_queue_deinit(&my_obj->ongoing_job_q);
+    pthread_mutex_destroy(&my_obj->job_lock);
+  }
+
+#ifdef LOAD_ADSP_RPC_LIB
+  my_obj->adsprpc_lib_handle = dlopen("libadsprpc.so", RTLD_NOW);
+  if (NULL == my_obj->adsprpc_lib_handle) {
+    CDBG_ERROR("%s:%d] Cannot load the library", __func__, __LINE__);
+    /* not returning error here bcoz even if this loading fails
+        we can go ahead with SW JPEG enc */
+  }
+#endif
+
+  return rc;
+}
+
+/** mm_jpeg_deinit:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Deinits the jpeg client
+ *
+ **/
+int32_t mm_jpeg_deinit(mm_jpeg_obj *my_obj)
+{
+  int32_t rc = 0;
+  uint32_t i = 0;
+
+  /* release jobmgr thread */
+  rc = mm_jpeg_jobmgr_thread_release(my_obj);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+  }
+
+  /* unload OMX engine */
+  OMX_Deinit();
+
+  /* deinit ongoing job and cb queue */
+  rc = mm_jpeg_queue_deinit(&my_obj->ongoing_job_q);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+  }
+
+  for (i = 0; i < my_obj->work_buf_cnt; i++) {
+    /*Release the ION buffer*/
+    rc = buffer_deallocate(&my_obj->ionBuffer[i]);
+    if (0 != rc) {
+      CDBG_ERROR("%s:%d] Error releasing ION buffer", __func__, __LINE__);
+    }
+  }
+
+  /* destroy locks */
+  pthread_mutex_destroy(&my_obj->job_lock);
+
+  return rc;
+}
+
+/** mm_jpeg_new_client:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Create new jpeg client
+ *
+ **/
+uint32_t mm_jpeg_new_client(mm_jpeg_obj *my_obj)
+{
+  uint32_t client_hdl = 0;
+  uint8_t idx;
+  int i = 0;
+
+  if (my_obj->num_clients >= MAX_JPEG_CLIENT_NUM) {
+    CDBG_ERROR("%s: num of clients reached limit", __func__);
+    return client_hdl;
+  }
+
+  for (idx = 0; idx < MAX_JPEG_CLIENT_NUM; idx++) {
+    if (0 == my_obj->clnt_mgr[idx].is_used) {
+      break;
+    }
+  }
+
+  if (idx < MAX_JPEG_CLIENT_NUM) {
+    /* client session avail */
+    /* generate client handler by index */
+    client_hdl = mm_jpeg_util_generate_handler(idx);
+
+    /* update client session */
+    my_obj->clnt_mgr[idx].is_used = 1;
+    my_obj->clnt_mgr[idx].client_handle = client_hdl;
+
+    pthread_mutex_init(&my_obj->clnt_mgr[idx].lock, NULL);
+    for (i = 0; i < MM_JPEG_MAX_SESSION; i++) {
+      memset(&my_obj->clnt_mgr[idx].session[i], 0x0, sizeof(mm_jpeg_job_session_t));
+    }
+
+    /* increse client count */
+    my_obj->num_clients++;
+  }
+
+  return client_hdl;
+}
+
+/** mm_jpeg_start_job:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_hdl: client handle
+ *    @job: pointer to encode job
+ *    @jobId: job id
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Start the encoding job
+ *
+ **/
+int32_t mm_jpeg_start_job(mm_jpeg_obj *my_obj,
+  mm_jpeg_job_t *job,
+  uint32_t *job_id)
+{
+  mm_jpeg_q_data_t qdata;
+  int32_t rc = -1;
+  uint8_t session_idx = 0;
+  uint8_t client_idx = 0;
+  mm_jpeg_job_q_node_t* node = NULL;
+  mm_jpeg_job_session_t *p_session = NULL;
+  mm_jpeg_encode_job_t *p_jobparams  = &job->encode_job;
+
+  *job_id = 0;
+
+  /* check if valid session */
+  session_idx = GET_SESSION_IDX(p_jobparams->session_id);
+  client_idx = GET_CLIENT_IDX(p_jobparams->session_id);
+  CDBG_HIGH("%s:%d] session_idx %d client idx %d", __func__, __LINE__,
+    session_idx, client_idx);
+
+  if ((session_idx >= MM_JPEG_MAX_SESSION) ||
+    (client_idx >= MAX_JPEG_CLIENT_NUM)) {
+    CDBG_ERROR("%s:%d] invalid session id %x", __func__, __LINE__,
+      job->encode_job.session_id);
+    return rc;
+  }
+
+  p_session = &my_obj->clnt_mgr[client_idx].session[session_idx];
+  if (OMX_FALSE == p_session->active) {
+    CDBG_ERROR("%s:%d] session not active %x", __func__, __LINE__,
+      job->encode_job.session_id);
+    return rc;
+  }
+
+  if ((p_jobparams->src_index >= (int32_t)p_session->params.num_src_bufs) ||
+    (p_jobparams->dst_index >= (int32_t)p_session->params.num_dst_bufs)) {
+    CDBG_ERROR("%s:%d] invalid buffer indices", __func__, __LINE__);
+    return rc;
+  }
+
+  /* enqueue new job into todo job queue */
+  node = (mm_jpeg_job_q_node_t *)malloc(sizeof(mm_jpeg_job_q_node_t));
+  if (NULL == node) {
+    CDBG_ERROR("%s: No memory for mm_jpeg_job_q_node_t", __func__);
+    return -1;
+  }
+
+  ATRACE_INT("Camera:JPEG",
+      (int32_t)((uint32_t)session_idx<<16 | ++p_session->job_index));
+
+  *job_id = job->encode_job.session_id |
+    (((uint32_t)p_session->job_hist++ % JOB_HIST_MAX) << 16);
+
+  memset(node, 0, sizeof(mm_jpeg_job_q_node_t));
+  node->enc_info.encode_job = job->encode_job;
+  if (p_session->thumb_from_main) {
+    node->enc_info.encode_job.thumb_dim.src_dim =
+      node->enc_info.encode_job.main_dim.src_dim;
+    node->enc_info.encode_job.thumb_dim.crop =
+      node->enc_info.encode_job.main_dim.crop;
+  }
+  node->enc_info.job_id = *job_id;
+  node->enc_info.client_handle = p_session->client_hdl;
+  node->type = MM_JPEG_CMD_TYPE_JOB;
+
+
+
+  qdata.p = node;
+  rc = mm_jpeg_queue_enq(&my_obj->job_mgr.job_queue, qdata);
+  if (0 == rc) {
+      cam_sem_post(&my_obj->job_mgr.job_sem);
+  }
+
+  CDBG_HIGH("%s:%d] job_id %d X", __func__, __LINE__, *job_id);
+
+  return rc;
+}
+
+
+
+/** mm_jpeg_abort_job:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_hdl: client handle
+ *    @jobId: job id
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Abort the encoding session
+ *
+ **/
+int32_t mm_jpeg_abort_job(mm_jpeg_obj *my_obj,
+  uint32_t jobId)
+{
+  int32_t rc = -1;
+  mm_jpeg_job_q_node_t *node = NULL;
+  mm_jpeg_job_session_t *p_session = NULL;
+
+  CDBG("%s:%d] ", __func__, __LINE__);
+  pthread_mutex_lock(&my_obj->job_lock);
+
+  /* abort job if in todo queue */
+  node = mm_jpeg_queue_remove_job_by_job_id(&my_obj->job_mgr.job_queue, jobId);
+  if (NULL != node) {
+    free(node);
+    goto abort_done;
+  }
+
+  /* abort job if in ongoing queue */
+  node = mm_jpeg_queue_remove_job_by_job_id(&my_obj->ongoing_job_q, jobId);
+  if (NULL != node) {
+    /* find job that is OMX ongoing, ask OMX to abort the job */
+    p_session = mm_jpeg_get_session(my_obj, node->enc_info.job_id);
+    if (p_session) {
+      mm_jpeg_session_abort(p_session);
+    } else {
+      CDBG_ERROR("%s:%d] Invalid job id 0x%x", __func__, __LINE__,
+        node->enc_info.job_id);
+    }
+    free(node);
+    goto abort_done;
+  }
+
+abort_done:
+  pthread_mutex_unlock(&my_obj->job_lock);
+
+  return rc;
+}
+
+
+#ifdef MM_JPEG_READ_META_KEYFILE
+static int32_t mm_jpeg_read_meta_keyfile(mm_jpeg_job_session_t *p_session,
+    const char *filename)
+{
+  int rc = 0;
+  FILE *fp = NULL;
+  size_t file_size = 0;
+  fp = fopen(filename, "r");
+  if (!fp) {
+    CDBG_ERROR("%s:%d] Key not present", __func__, __LINE__);
+    return -1;
+  }
+  fseek(fp, 0, SEEK_END);
+  file_size = (size_t)ftell(fp);
+  fseek(fp, 0, SEEK_SET);
+
+  p_session->meta_enc_key = (uint8_t *) malloc((file_size + 1) * sizeof(uint8_t));
+
+  if (!p_session->meta_enc_key) {
+    CDBG_ERROR("%s:%d] error", __func__, __LINE__);
+    return -1;
+  }
+
+  fread(p_session->meta_enc_key, 1, file_size, fp);
+  fclose(fp);
+
+  p_session->meta_enc_keylen = file_size;
+
+  return rc;
+}
+#endif // MM_JPEG_READ_META_KEYFILE
+
+/** mm_jpeg_create_session:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_hdl: client handle
+ *    @p_params: pointer to encode params
+ *    @p_session_id: session id
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Start the encoding session
+ *
+ **/
+int32_t mm_jpeg_create_session(mm_jpeg_obj *my_obj,
+  uint32_t client_hdl,
+  mm_jpeg_encode_params_t *p_params,
+  uint32_t* p_session_id)
+{
+  mm_jpeg_q_data_t qdata;
+  int32_t rc = 0;
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  uint8_t clnt_idx = 0;
+  int session_idx = -1;
+  mm_jpeg_job_session_t *p_session = NULL;
+  mm_jpeg_job_session_t * p_prev_session = NULL;
+  *p_session_id = 0;
+  uint32_t i = 0;
+  uint32_t num_omx_sessions;
+  uint32_t work_buf_size;
+  mm_jpeg_queue_t *p_session_handle_q, *p_out_buf_q;
+  uint32_t work_bufs_need;
+  char trace_tag[32];
+
+  /* validate the parameters */
+  if ((p_params->num_src_bufs > MM_JPEG_MAX_BUF)
+    || (p_params->num_dst_bufs > MM_JPEG_MAX_BUF)) {
+    CDBG_ERROR("%s:%d] invalid num buffers", __func__, __LINE__);
+    return -1;
+  }
+
+  /* check if valid client */
+  clnt_idx = mm_jpeg_util_get_index_by_handler(client_hdl);
+  if (clnt_idx >= MAX_JPEG_CLIENT_NUM) {
+    CDBG_ERROR("%s: invalid client with handler (%d)", __func__, client_hdl);
+    return -1;
+  }
+
+  num_omx_sessions = 1;
+  if (p_params->burst_mode) {
+    num_omx_sessions = MM_JPEG_CONCURRENT_SESSIONS_COUNT;
+  }
+  work_bufs_need = num_omx_sessions;
+  if (work_bufs_need > MM_JPEG_CONCURRENT_SESSIONS_COUNT) {
+    work_bufs_need = MM_JPEG_CONCURRENT_SESSIONS_COUNT;
+  }
+  CDBG_HIGH("%s:%d] >>>> Work bufs need %d", __func__, __LINE__, work_bufs_need);
+  work_buf_size = CEILING64((uint32_t)my_obj->max_pic_w) *
+      CEILING64((uint32_t)my_obj->max_pic_h) * 3 / 2;
+  for (i = my_obj->work_buf_cnt; i < work_bufs_need; i++) {
+     my_obj->ionBuffer[i].size = CEILING32(work_buf_size);
+     CDBG_HIGH("Max picture size %d x %d, WorkBufSize = %zu",
+         my_obj->max_pic_w, my_obj->max_pic_h, my_obj->ionBuffer[i].size);
+
+     my_obj->ionBuffer[i].addr = (uint8_t *)buffer_allocate(&my_obj->ionBuffer[i], 1);
+     if (NULL == my_obj->ionBuffer[i].addr) {
+       CDBG_ERROR("%s:%d] Ion allocation failed",__func__, __LINE__);
+       goto error1;
+     }
+     my_obj->work_buf_cnt++;
+  }
+
+
+  /* init omx handle queue */
+  p_session_handle_q = (mm_jpeg_queue_t *) malloc(sizeof(*p_session_handle_q));
+  if (NULL == p_session_handle_q) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    goto error1;
+  }
+  rc = mm_jpeg_queue_init(p_session_handle_q);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    free(p_session_handle_q);
+    goto error1;
+  }
+
+  /* init output buf queue */
+  p_out_buf_q = (mm_jpeg_queue_t *) malloc(sizeof(*p_out_buf_q));
+  if (NULL == p_out_buf_q) {
+    CDBG_ERROR("%s:%d] Error: Cannot allocate memory\n", __func__, __LINE__);
+    return -1;
+  }
+
+  /* init omx handle queue */
+  rc = mm_jpeg_queue_init(p_out_buf_q);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    free(p_out_buf_q);
+    goto error1;
+  }
+
+  for (i = 0; i < num_omx_sessions; i++) {
+    uint32_t buf_idx = 0U;
+    session_idx = mm_jpeg_get_new_session_idx(my_obj, clnt_idx, &p_session);
+    if (session_idx < 0 || NULL == p_session) {
+      CDBG_ERROR("%s:%d] invalid session id (%d)", __func__, __LINE__, session_idx);
+      goto error2;
+    }
+
+    snprintf(trace_tag, sizeof(trace_tag), "Camera:JPEGsession%d", session_idx);
+    ATRACE_INT(trace_tag, 1);
+
+    p_session->job_index = 0;
+
+    p_session->next_session = NULL;
+
+    if (p_prev_session) {
+      p_prev_session->next_session = p_session;
+    }
+    p_prev_session = p_session;
+
+    buf_idx = i;
+    if (buf_idx < MM_JPEG_CONCURRENT_SESSIONS_COUNT) {
+      p_session->work_buffer = my_obj->ionBuffer[buf_idx];
+    } else {
+      CDBG_ERROR("%s %d: Invalid Index, Setting buffer add to null", __func__, __LINE__);
+      p_session->work_buffer.addr = NULL;
+      p_session->work_buffer.ion_fd = -1;
+      p_session->work_buffer.p_pmem_fd = -1;
+    }
+
+    p_session->jpeg_obj = (void*)my_obj; /* save a ptr to jpeg_obj */
+
+    ret = mm_jpeg_session_create(p_session);
+    if (OMX_ErrorNone != ret) {
+      p_session->active = OMX_FALSE;
+      CDBG_ERROR("%s:%d] jpeg session create failed", __func__, __LINE__);
+      goto error2;
+    }
+
+    uint32_t session_id = (JOB_ID_MAGICVAL << 24) |
+        ((uint32_t)session_idx << 8) | clnt_idx;
+
+    if (!*p_session_id) {
+      *p_session_id = session_id;
+    }
+
+    /*copy the params*/
+    p_session->params = *p_params;
+    if (p_session->thumb_from_main) {
+      memcpy(p_session->params.src_thumb_buf, p_session->params.src_main_buf,
+        sizeof(p_session->params.src_thumb_buf));
+      p_session->params.num_tmb_bufs =  p_session->params.num_src_bufs;
+      if (!p_session->params.encode_thumbnail) {
+         p_session->params.num_tmb_bufs = 0;
+      }
+      p_session->params.thumb_dim.src_dim = p_session->params.main_dim.src_dim;
+      p_session->params.thumb_dim.crop = p_session->params.main_dim.crop;
+    }
+    p_session->client_hdl = client_hdl;
+    p_session->sessionId = session_id;
+    p_session->session_handle_q = p_session_handle_q;
+    p_session->out_buf_q = p_out_buf_q;
+
+    qdata.p = p_session;
+    mm_jpeg_queue_enq(p_session_handle_q, qdata);
+
+    p_session->meta_enc_key = NULL;
+    p_session->meta_enc_keylen = 0;
+
+#ifdef MM_JPEG_READ_META_KEYFILE
+    mm_jpeg_read_meta_keyfile(p_session, META_KEYFILE);
+#endif
+
+    if (OMX_FALSE == p_session->config) {
+      rc = mm_jpeg_session_configure(p_session);
+      if (rc) {
+        CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+        goto error2;
+      }
+      p_session->config = OMX_TRUE;
+    }
+    p_session->num_omx_sessions = num_omx_sessions;
+
+    CDBG_HIGH("%s:%d] session id %x", __func__, __LINE__, session_id);
+  }
+
+  // Queue the output buf indexes
+  for (i = 0; i < p_params->num_dst_bufs; i++) {
+    qdata.u32 = i + 1;
+    mm_jpeg_queue_enq(p_out_buf_q, qdata);
+  }
+
+  return rc;
+
+error1:
+  rc = -1;
+error2:
+  if (NULL != p_session) {
+    ATRACE_INT(trace_tag, 0);
+  }
+  return rc;
+}
+
+/** mm_jpegenc_destroy_job
+ *
+ *  Arguments:
+ *    @p_session: Session obj
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Destroy the job based paramenters
+ *
+ **/
+static int32_t mm_jpegenc_destroy_job(mm_jpeg_job_session_t *p_session)
+{
+  mm_jpeg_encode_job_t *p_jobparams = &p_session->encode_job;
+  int i = 0, rc = 0;
+
+  CDBG_HIGH("%s:%d] Exif entry count %d %d", __func__, __LINE__,
+    (int)p_jobparams->exif_info.numOfEntries,
+    (int)p_session->exif_count_local);
+  for (i = 0; i < p_session->exif_count_local; i++) {
+    rc = releaseExifEntry(&p_session->exif_info_local[i]);
+    if (rc) {
+      CDBG_ERROR("%s:%d] Exif release failed (%d)", __func__, __LINE__, rc);
+    }
+  }
+  p_session->exif_count_local = 0;
+
+  return rc;
+}
+
+/** mm_jpeg_session_encode:
+ *
+ *  Arguments:
+ *    @p_session: encode session
+ *
+ *  Return:
+ *       OMX_ERRORTYPE
+ *
+ *  Description:
+ *       Start the encoding
+ *
+ **/
+static void mm_jpegenc_job_done(mm_jpeg_job_session_t *p_session)
+{
+  mm_jpeg_q_data_t qdata;
+  mm_jpeg_obj *my_obj = (mm_jpeg_obj *)p_session->jpeg_obj;
+  mm_jpeg_job_q_node_t *node = NULL;
+
+  /*Destroy job related params*/
+  mm_jpegenc_destroy_job(p_session);
+
+  /*remove the job*/
+  node = mm_jpeg_queue_remove_job_by_job_id(&my_obj->ongoing_job_q,
+    p_session->jobId);
+  if (node) {
+    free(node);
+  }
+  p_session->encoding = OMX_FALSE;
+
+  // Queue to available sessions
+  qdata.p = p_session;
+  mm_jpeg_queue_enq(p_session->session_handle_q, qdata);
+
+  if (p_session->auto_out_buf) {
+    //Queue out buf index
+    qdata.u32 = (uint32_t)(p_session->encode_job.dst_index + 1);
+    mm_jpeg_queue_enq(p_session->out_buf_q, qdata);
+  }
+
+  /* wake up jobMgr thread to work on new job if there is any */
+  cam_sem_post(&my_obj->job_mgr.job_sem);
+}
+
+/** mm_jpeg_destroy_session:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @session_id: session index
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Destroy the encoding session
+ *
+ **/
+int32_t mm_jpeg_destroy_session(mm_jpeg_obj *my_obj,
+  mm_jpeg_job_session_t *p_session)
+{
+  mm_jpeg_q_data_t qdata;
+  int32_t rc = 0;
+  mm_jpeg_job_q_node_t *node = NULL;
+  uint32_t session_id = 0;
+  mm_jpeg_job_session_t *p_cur_sess;
+  char trace_tag[32];
+
+  if (NULL == p_session) {
+    CDBG_ERROR("%s:%d] invalid session", __func__, __LINE__);
+    return rc;
+  }
+
+  session_id = p_session->sessionId;
+
+  pthread_mutex_lock(&my_obj->job_lock);
+
+  /* abort job if in todo queue */
+  CDBG_HIGH("%s:%d] abort todo jobs", __func__, __LINE__);
+  node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->job_mgr.job_queue, session_id);
+  while (NULL != node) {
+    free(node);
+    node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->job_mgr.job_queue, session_id);
+  }
+
+  /* abort job if in ongoing queue */
+  CDBG_HIGH("%s:%d] abort ongoing jobs", __func__, __LINE__);
+  node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->ongoing_job_q, session_id);
+  while (NULL != node) {
+    free(node);
+    node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->ongoing_job_q, session_id);
+  }
+
+  /* abort the current session */
+  mm_jpeg_session_abort(p_session);
+  mm_jpeg_session_destroy(p_session);
+
+  p_cur_sess = p_session;
+
+  do {
+    mm_jpeg_remove_session_idx(my_obj, p_cur_sess->sessionId);
+  } while (NULL != (p_cur_sess = p_cur_sess->next_session));
+
+
+  pthread_mutex_unlock(&my_obj->job_lock);
+
+  while (1) {
+    qdata = mm_jpeg_queue_deq(p_session->session_handle_q);
+    if (NULL == qdata.p)
+      break;
+  }
+  mm_jpeg_queue_deinit(p_session->session_handle_q);
+  free(p_session->session_handle_q);
+  p_session->session_handle_q = NULL;
+
+  while (1) {
+    qdata = mm_jpeg_queue_deq(p_session->out_buf_q);
+    if (0U == qdata.u32)
+      break;
+  }
+  mm_jpeg_queue_deinit(p_session->out_buf_q);
+  free(p_session->out_buf_q);
+  p_session->out_buf_q = NULL;
+
+
+  /* wake up jobMgr thread to work on new job if there is any */
+  cam_sem_post(&my_obj->job_mgr.job_sem);
+
+  snprintf(trace_tag, sizeof(trace_tag), "Camera:JPEGsession%d", GET_SESSION_IDX(session_id));
+  ATRACE_INT(trace_tag, 0);
+
+  CDBG_HIGH("%s:%d] destroy session successful. X", __func__, __LINE__);
+
+  return rc;
+}
+
+
+
+
+/** mm_jpeg_destroy_session:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @session_id: session index
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Destroy the encoding session
+ *
+ **/
+int32_t mm_jpeg_destroy_session_unlocked(mm_jpeg_obj *my_obj,
+  mm_jpeg_job_session_t *p_session)
+{
+  int32_t rc = -1;
+  mm_jpeg_job_q_node_t *node = NULL;
+  uint32_t session_id = 0;
+  if (NULL == p_session) {
+    CDBG_ERROR("%s:%d] invalid session", __func__, __LINE__);
+    return rc;
+  }
+
+  session_id = p_session->sessionId;
+
+  /* abort job if in todo queue */
+  CDBG("%s:%d] abort todo jobs", __func__, __LINE__);
+  node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->job_mgr.job_queue, session_id);
+  while (NULL != node) {
+    free(node);
+    node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->job_mgr.job_queue, session_id);
+  }
+
+  /* abort job if in ongoing queue */
+  CDBG("%s:%d] abort ongoing jobs", __func__, __LINE__);
+  node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->ongoing_job_q, session_id);
+  while (NULL != node) {
+    free(node);
+    node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->ongoing_job_q, session_id);
+  }
+
+  /* abort the current session */
+  mm_jpeg_session_abort(p_session);
+  //mm_jpeg_remove_session_idx(my_obj, session_id);
+
+  return rc;
+}
+
+/** mm_jpeg_destroy_session:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @session_id: session index
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Destroy the encoding session
+ *
+ **/
+int32_t mm_jpeg_destroy_session_by_id(mm_jpeg_obj *my_obj, uint32_t session_id)
+{
+  mm_jpeg_job_session_t *p_session = mm_jpeg_get_session(my_obj, session_id);
+
+  return mm_jpeg_destroy_session(my_obj, p_session);
+}
+
+
+
+/** mm_jpeg_close:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_hdl: client handle
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Close the jpeg client
+ *
+ **/
+int32_t mm_jpeg_close(mm_jpeg_obj *my_obj, uint32_t client_hdl)
+{
+  int32_t rc = -1;
+  uint8_t clnt_idx = 0;
+  int i = 0;
+
+  /* check if valid client */
+  clnt_idx = mm_jpeg_util_get_index_by_handler(client_hdl);
+  if (clnt_idx >= MAX_JPEG_CLIENT_NUM) {
+    CDBG_ERROR("%s: invalid client with handler (%d)", __func__, client_hdl);
+    return rc;
+  }
+
+  CDBG("%s:%d] E", __func__, __LINE__);
+
+  /* abort all jobs from the client */
+  pthread_mutex_lock(&my_obj->job_lock);
+
+  CDBG("%s:%d] ", __func__, __LINE__);
+
+  for (i = 0; i < MM_JPEG_MAX_SESSION; i++) {
+    if (OMX_TRUE == my_obj->clnt_mgr[clnt_idx].session[i].active)
+      mm_jpeg_destroy_session_unlocked(my_obj,
+        &my_obj->clnt_mgr[clnt_idx].session[i]);
+  }
+
+  CDBG("%s:%d] ", __func__, __LINE__);
+
+#ifdef LOAD_ADSP_RPC_LIB
+  if (NULL != my_obj->adsprpc_lib_handle) {
+    dlclose(my_obj->adsprpc_lib_handle);
+    my_obj->adsprpc_lib_handle = NULL;
+  }
+#endif
+
+  pthread_mutex_unlock(&my_obj->job_lock);
+  CDBG("%s:%d] ", __func__, __LINE__);
+
+  /* invalidate client session */
+  pthread_mutex_destroy(&my_obj->clnt_mgr[clnt_idx].lock);
+  memset(&my_obj->clnt_mgr[clnt_idx], 0, sizeof(mm_jpeg_client_t));
+
+  rc = 0;
+  CDBG("%s:%d] X", __func__, __LINE__);
+  return rc;
+}
+
+OMX_ERRORTYPE mm_jpeg_ebd(OMX_HANDLETYPE hComponent,
+  OMX_PTR pAppData,
+  OMX_BUFFERHEADERTYPE *pBuffer)
+{
+  mm_jpeg_job_session_t *p_session = (mm_jpeg_job_session_t *) pAppData;
+
+  CDBG_HIGH("%s:%d] count %d ", __func__, __LINE__, p_session->ebd_count);
+  pthread_mutex_lock(&p_session->lock);
+  p_session->ebd_count++;
+  pthread_mutex_unlock(&p_session->lock);
+  return 0;
+}
+
+OMX_ERRORTYPE mm_jpeg_fbd(OMX_HANDLETYPE hComponent,
+  OMX_PTR pAppData,
+  OMX_BUFFERHEADERTYPE *pBuffer)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  mm_jpeg_job_session_t *p_session = (mm_jpeg_job_session_t *) pAppData;
+  mm_jpeg_output_t output_buf;
+  CDBG_HIGH("%s:%d] count %d ", __func__, __LINE__, p_session->fbd_count);
+  CDBG_HIGH("[KPI Perf] : PROFILE_JPEG_FBD");
+
+  pthread_mutex_lock(&p_session->lock);
+  ATRACE_INT("Camera:JPEG",
+      (int32_t)((uint32_t)GET_SESSION_IDX(
+        p_session->sessionId)<<16 | --p_session->job_index));
+  if (MM_JPEG_ABORT_NONE != p_session->abort_state) {
+    pthread_mutex_unlock(&p_session->lock);
+    return ret;
+  }
+#ifdef MM_JPEG_DUMP_OUT_BS
+  char filename[256];
+  static int bsc;
+  snprintf(filename, sizeof(filename),
+      QCAMERA_DUMP_FRM_LOCATION"jpeg/mm_jpeg_bs%d.jpg", bsc++);
+  DUMP_TO_FILE(filename,
+    pBuffer->pBuffer,
+    (size_t)(uint32_t)pBuffer->nFilledLen);
+#endif
+
+  p_session->fbd_count++;
+  if (NULL != p_session->params.jpeg_cb) {
+
+    p_session->job_status = JPEG_JOB_STATUS_DONE;
+    output_buf.buf_filled_len = (uint32_t)pBuffer->nFilledLen;
+    output_buf.buf_vaddr = pBuffer->pBuffer;
+    output_buf.fd = -1;
+    CDBG_HIGH("%s:%d] send jpeg callback %d buf 0x%p len %u JobID %u",
+      __func__, __LINE__,
+      p_session->job_status, pBuffer->pBuffer,
+      (unsigned int)pBuffer->nFilledLen, p_session->jobId);
+    p_session->params.jpeg_cb(p_session->job_status,
+      p_session->client_hdl,
+      p_session->jobId,
+      &output_buf,
+      p_session->params.userdata);
+
+    mm_jpegenc_job_done(p_session);
+
+  }
+  pthread_mutex_unlock(&p_session->lock);
+  CDBG("%s:%d] ", __func__, __LINE__);
+
+  return ret;
+}
+
+
+
+OMX_ERRORTYPE mm_jpeg_event_handler(OMX_HANDLETYPE hComponent,
+  OMX_PTR pAppData,
+  OMX_EVENTTYPE eEvent,
+  OMX_U32 nData1,
+  OMX_U32 nData2,
+  OMX_PTR pEventData)
+{
+  mm_jpeg_job_session_t *p_session = (mm_jpeg_job_session_t *) pAppData;
+
+  CDBG("%s:%d] %d %d %d state %d", __func__, __LINE__, eEvent, (int)nData1,
+    (int)nData2, p_session->abort_state);
+
+  pthread_mutex_lock(&p_session->lock);
+
+  if (MM_JPEG_ABORT_INIT == p_session->abort_state) {
+    p_session->abort_state = MM_JPEG_ABORT_DONE;
+    pthread_cond_signal(&p_session->cond);
+    pthread_mutex_unlock(&p_session->lock);
+    return OMX_ErrorNone;
+  }
+
+  if (eEvent == OMX_EventError) {
+    p_session->error_flag = nData2;
+    if (p_session->encoding == OMX_TRUE) {
+      CDBG_ERROR("%s:%d] Error during encoding", __func__, __LINE__);
+
+      /* send jpeg callback */
+      if (NULL != p_session->params.jpeg_cb) {
+        p_session->job_status = JPEG_JOB_STATUS_ERROR;
+        CDBG_ERROR("%s:%d] send jpeg error callback %d", __func__, __LINE__,
+          p_session->job_status);
+        p_session->params.jpeg_cb(p_session->job_status,
+          p_session->client_hdl,
+          p_session->jobId,
+          NULL,
+          p_session->params.userdata);
+      }
+
+      /* remove from ready queue */
+      mm_jpegenc_job_done(p_session);
+    }
+    pthread_cond_signal(&p_session->cond);
+  } else if (eEvent == OMX_EventCmdComplete) {
+    if (p_session->state_change_pending == OMX_TRUE) {
+      p_session->state_change_pending = OMX_FALSE;
+      pthread_cond_signal(&p_session->cond);
+    }
+  }
+
+  pthread_mutex_unlock(&p_session->lock);
+  CDBG("%s:%d]", __func__, __LINE__);
+  return OMX_ErrorNone;
+}
+
+
+
+/* remove the first job from the queue with matching client handle */
+mm_jpeg_job_q_node_t* mm_jpeg_queue_remove_job_by_client_id(
+  mm_jpeg_queue_t* queue, uint32_t client_hdl)
+{
+  mm_jpeg_q_node_t* node = NULL;
+  mm_jpeg_job_q_node_t* data = NULL;
+  mm_jpeg_job_q_node_t* job_node = NULL;
+  struct cam_list *head = NULL;
+  struct cam_list *pos = NULL;
+
+  pthread_mutex_lock(&queue->lock);
+  head = &queue->head.list;
+  pos = head->next;
+  while(pos != head) {
+    node = member_of(pos, mm_jpeg_q_node_t, list);
+    data = (mm_jpeg_job_q_node_t *)node->data.p;
+
+    if (data && (data->enc_info.client_handle == client_hdl)) {
+      CDBG_HIGH("%s:%d] found matching client handle", __func__, __LINE__);
+      job_node = data;
+      cam_list_del_node(&node->list);
+      queue->size--;
+      free(node);
+      CDBG_HIGH("%s: queue size = %d", __func__, queue->size);
+      break;
+    }
+    pos = pos->next;
+  }
+
+  pthread_mutex_unlock(&queue->lock);
+
+  return job_node;
+}
+
+/* remove the first job from the queue with matching session id */
+mm_jpeg_job_q_node_t* mm_jpeg_queue_remove_job_by_session_id(
+  mm_jpeg_queue_t* queue, uint32_t session_id)
+{
+  mm_jpeg_q_node_t* node = NULL;
+  mm_jpeg_job_q_node_t* data = NULL;
+  mm_jpeg_job_q_node_t* job_node = NULL;
+  struct cam_list *head = NULL;
+  struct cam_list *pos = NULL;
+
+  pthread_mutex_lock(&queue->lock);
+  head = &queue->head.list;
+  pos = head->next;
+  while(pos != head) {
+    node = member_of(pos, mm_jpeg_q_node_t, list);
+    data = (mm_jpeg_job_q_node_t *)node->data.p;
+
+    if (data && (data->enc_info.encode_job.session_id == session_id)) {
+      CDBG_HIGH("%s:%d] found matching session id", __func__, __LINE__);
+      job_node = data;
+      cam_list_del_node(&node->list);
+      queue->size--;
+      free(node);
+      CDBG_HIGH("%s: queue size = %d", __func__, queue->size);
+      break;
+    }
+    pos = pos->next;
+  }
+
+  pthread_mutex_unlock(&queue->lock);
+
+  return job_node;
+}
+
+/* remove job from the queue with matching job id */
+mm_jpeg_job_q_node_t* mm_jpeg_queue_remove_job_by_job_id(
+  mm_jpeg_queue_t* queue, uint32_t job_id)
+{
+  mm_jpeg_q_node_t* node = NULL;
+  mm_jpeg_job_q_node_t* data = NULL;
+  mm_jpeg_job_q_node_t* job_node = NULL;
+  struct cam_list *head = NULL;
+  struct cam_list *pos = NULL;
+  uint32_t lq_job_id;
+
+  pthread_mutex_lock(&queue->lock);
+  head = &queue->head.list;
+  pos = head->next;
+  while(pos != head) {
+    node = member_of(pos, mm_jpeg_q_node_t, list);
+    data = (mm_jpeg_job_q_node_t *)node->data.p;
+
+    if(NULL == data) {
+      CDBG_ERROR("%s:%d] Data is NULL", __func__, __LINE__);
+      pthread_mutex_unlock(&queue->lock);
+      return NULL;
+    }
+
+    if (data->type == MM_JPEG_CMD_TYPE_DECODE_JOB) {
+      lq_job_id = data->dec_info.job_id;
+    } else {
+      lq_job_id = data->enc_info.job_id;
+    }
+
+    if (data && (lq_job_id == job_id)) {
+      CDBG_HIGH("%s:%d] found matching job id", __func__, __LINE__);
+      job_node = data;
+      cam_list_del_node(&node->list);
+      queue->size--;
+      free(node);
+      break;
+    }
+    pos = pos->next;
+  }
+
+  pthread_mutex_unlock(&queue->lock);
+
+  return job_node;
+}
+
+/* remove job from the queue with matching job id */
+mm_jpeg_job_q_node_t* mm_jpeg_queue_remove_job_unlk(
+  mm_jpeg_queue_t* queue, uint32_t job_id)
+{
+  mm_jpeg_q_node_t* node = NULL;
+  mm_jpeg_job_q_node_t* data = NULL;
+  mm_jpeg_job_q_node_t* job_node = NULL;
+  struct cam_list *head = NULL;
+  struct cam_list *pos = NULL;
+
+  head = &queue->head.list;
+  pos = head->next;
+  while(pos != head) {
+    node = member_of(pos, mm_jpeg_q_node_t, list);
+    data = (mm_jpeg_job_q_node_t *)node->data.p;
+
+    if (data && (data->enc_info.job_id == job_id)) {
+      job_node = data;
+      cam_list_del_node(&node->list);
+      queue->size--;
+      free(node);
+      break;
+    }
+    pos = pos->next;
+  }
+
+  return job_node;
+}
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_exif.c b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_exif.c
new file mode 100644
index 0000000..043a9de
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_exif.c
@@ -0,0 +1,643 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pthread.h>
+#include "mm_jpeg_dbg.h"
+#include "mm_jpeg.h"
+#include <errno.h>
+#include <math.h>
+
+
+#define LOWER(a)               ((a) & 0xFFFF)
+#define UPPER(a)               (((a)>>16) & 0xFFFF)
+#define CHANGE_ENDIAN_16(a)  ((0x00FF & ((a)>>8)) | (0xFF00 & ((a)<<8)))
+#define ROUND(a) \
+        ((a >= 0) ? (uint32_t)(a + 0.5) : (uint32_t)(a - 0.5))
+
+
+/** addExifEntry:
+ *
+ *  Arguments:
+ *   @exif_info : Exif info struct
+ *   @p_session: job session
+ *   @tagid   : exif tag ID
+ *   @type    : data type
+ *   @count   : number of data in uint of its type
+ *   @data    : input data ptr
+ *
+ *  Retrun     : int32_t type of status
+ *               0  -- success
+ *              none-zero failure code
+ *
+ *  Description:
+ *       Function to add an entry to exif data
+ *
+ **/
+int32_t addExifEntry(QOMX_EXIF_INFO *p_exif_info, exif_tag_id_t tagid,
+  exif_tag_type_t type, uint32_t count, void *data)
+{
+    int32_t rc = 0;
+    uint32_t numOfEntries = (uint32_t)p_exif_info->numOfEntries;
+    QEXIF_INFO_DATA *p_info_data = p_exif_info->exif_data;
+    if(numOfEntries >= MAX_EXIF_TABLE_ENTRIES) {
+        ALOGE("%s: Number of entries exceeded limit", __func__);
+        return -1;
+    }
+
+    p_info_data[numOfEntries].tag_id = tagid;
+    p_info_data[numOfEntries].tag_entry.type = type;
+    p_info_data[numOfEntries].tag_entry.count = count;
+    p_info_data[numOfEntries].tag_entry.copy = 1;
+    switch (type) {
+    case EXIF_BYTE: {
+      if (count > 1) {
+        uint8_t *values = (uint8_t *)malloc(count);
+        if (values == NULL) {
+          ALOGE("%s: No memory for byte array", __func__);
+          rc = -1;
+        } else {
+          memcpy(values, data, count);
+          p_info_data[numOfEntries].tag_entry.data._bytes = values;
+        }
+      } else {
+        p_info_data[numOfEntries].tag_entry.data._byte = *(uint8_t *)data;
+      }
+    }
+    break;
+    case EXIF_ASCII: {
+      char *str = NULL;
+      str = (char *)malloc(count + 1);
+      if (str == NULL) {
+        ALOGE("%s: No memory for ascii string", __func__);
+        rc = -1;
+      } else {
+        memset(str, 0, count + 1);
+        memcpy(str, data, count);
+        p_info_data[numOfEntries].tag_entry.data._ascii = str;
+      }
+    }
+    break;
+    case EXIF_SHORT: {
+      if (count > 1) {
+        uint16_t *values = (uint16_t *)malloc(count * sizeof(uint16_t));
+        if (values == NULL) {
+          ALOGE("%s: No memory for short array", __func__);
+          rc = -1;
+        } else {
+          memcpy(values, data, count * sizeof(uint16_t));
+          p_info_data[numOfEntries].tag_entry.data._shorts = values;
+        }
+      } else {
+        p_info_data[numOfEntries].tag_entry.data._short = *(uint16_t *)data;
+      }
+    }
+    break;
+    case EXIF_LONG: {
+      if (count > 1) {
+        uint32_t *values = (uint32_t *)malloc(count * sizeof(uint32_t));
+        if (values == NULL) {
+          ALOGE("%s: No memory for long array", __func__);
+          rc = -1;
+        } else {
+          memcpy(values, data, count * sizeof(uint32_t));
+          p_info_data[numOfEntries].tag_entry.data._longs = values;
+        }
+      } else {
+        p_info_data[numOfEntries].tag_entry.data._long = *(uint32_t *)data;
+      }
+    }
+    break;
+    case EXIF_RATIONAL: {
+      if (count > 1) {
+        rat_t *values = (rat_t *)malloc(count * sizeof(rat_t));
+        if (values == NULL) {
+          ALOGE("%s: No memory for rational array", __func__);
+          rc = -1;
+        } else {
+          memcpy(values, data, count * sizeof(rat_t));
+          p_info_data[numOfEntries].tag_entry.data._rats = values;
+        }
+      } else {
+        p_info_data[numOfEntries].tag_entry.data._rat = *(rat_t *)data;
+      }
+    }
+    break;
+    case EXIF_UNDEFINED: {
+      uint8_t *values = (uint8_t *)malloc(count);
+      if (values == NULL) {
+        ALOGE("%s: No memory for undefined array", __func__);
+        rc = -1;
+      } else {
+        memcpy(values, data, count);
+        p_info_data[numOfEntries].tag_entry.data._undefined = values;
+      }
+    }
+    break;
+    case EXIF_SLONG: {
+      if (count > 1) {
+        int32_t *values = (int32_t *)malloc(count * sizeof(int32_t));
+        if (values == NULL) {
+          ALOGE("%s: No memory for signed long array", __func__);
+          rc = -1;
+        } else {
+          memcpy(values, data, count * sizeof(int32_t));
+          p_info_data[numOfEntries].tag_entry.data._slongs = values;
+        }
+      } else {
+        p_info_data[numOfEntries].tag_entry.data._slong = *(int32_t *)data;
+      }
+    }
+    break;
+    case EXIF_SRATIONAL: {
+      if (count > 1) {
+        srat_t *values = (srat_t *)malloc(count * sizeof(srat_t));
+        if (values == NULL) {
+          ALOGE("%s: No memory for signed rational array", __func__);
+          rc = -1;
+        } else {
+          memcpy(values, data, count * sizeof(srat_t));
+          p_info_data[numOfEntries].tag_entry.data._srats = values;
+        }
+      } else {
+        p_info_data[numOfEntries].tag_entry.data._srat = *(srat_t *)data;
+      }
+    }
+    break;
+    }
+
+    // Increase number of entries
+    p_exif_info->numOfEntries++;
+    return rc;
+}
+
+/** releaseExifEntry
+ *
+ *  Arguments:
+ *   @p_exif_data : Exif info struct
+ *
+ *  Retrun     : int32_t type of status
+ *               0  -- success
+ *              none-zero failure code
+ *
+ *  Description:
+ *       Function to release an entry from exif data
+ *
+ **/
+int32_t releaseExifEntry(QEXIF_INFO_DATA *p_exif_data)
+{
+ switch (p_exif_data->tag_entry.type) {
+  case EXIF_BYTE: {
+    if (p_exif_data->tag_entry.count > 1 &&
+      p_exif_data->tag_entry.data._bytes != NULL) {
+      free(p_exif_data->tag_entry.data._bytes);
+      p_exif_data->tag_entry.data._bytes = NULL;
+    }
+  }
+  break;
+  case EXIF_ASCII: {
+    if (p_exif_data->tag_entry.data._ascii != NULL) {
+      free(p_exif_data->tag_entry.data._ascii);
+      p_exif_data->tag_entry.data._ascii = NULL;
+    }
+  }
+  break;
+  case EXIF_SHORT: {
+    if (p_exif_data->tag_entry.count > 1 &&
+      p_exif_data->tag_entry.data._shorts != NULL) {
+      free(p_exif_data->tag_entry.data._shorts);
+      p_exif_data->tag_entry.data._shorts = NULL;
+    }
+  }
+  break;
+  case EXIF_LONG: {
+    if (p_exif_data->tag_entry.count > 1 &&
+      p_exif_data->tag_entry.data._longs != NULL) {
+      free(p_exif_data->tag_entry.data._longs);
+      p_exif_data->tag_entry.data._longs = NULL;
+    }
+  }
+  break;
+  case EXIF_RATIONAL: {
+    if (p_exif_data->tag_entry.count > 1 &&
+      p_exif_data->tag_entry.data._rats != NULL) {
+      free(p_exif_data->tag_entry.data._rats);
+      p_exif_data->tag_entry.data._rats = NULL;
+    }
+  }
+  break;
+  case EXIF_UNDEFINED: {
+    if (p_exif_data->tag_entry.data._undefined != NULL) {
+      free(p_exif_data->tag_entry.data._undefined);
+      p_exif_data->tag_entry.data._undefined = NULL;
+    }
+  }
+  break;
+  case EXIF_SLONG: {
+    if (p_exif_data->tag_entry.count > 1 &&
+      p_exif_data->tag_entry.data._slongs != NULL) {
+      free(p_exif_data->tag_entry.data._slongs);
+      p_exif_data->tag_entry.data._slongs = NULL;
+    }
+  }
+  break;
+  case EXIF_SRATIONAL: {
+    if (p_exif_data->tag_entry.count > 1 &&
+      p_exif_data->tag_entry.data._srats != NULL) {
+      free(p_exif_data->tag_entry.data._srats);
+      p_exif_data->tag_entry.data._srats = NULL;
+    }
+  }
+  break;
+  } /*end of switch*/
+
+  return 0;
+}
+
+/** process_sensor_data:
+ *
+ *  Arguments:
+ *   @p_sensor_params : ptr to sensor data
+ *
+ *  Return     : int32_t type of status
+ *               NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *  Description:
+ *       process sensor data
+ *
+ *  Notes: this needs to be filled for the metadata
+ **/
+int process_sensor_data(cam_sensor_params_t *p_sensor_params,
+  QOMX_EXIF_INFO *exif_info)
+{
+  int rc = 0;
+  rat_t val_rat;
+
+  if (NULL == p_sensor_params) {
+    ALOGE("%s %d: Sensor params are null", __func__, __LINE__);
+    return 0;
+  }
+
+  CDBG("%s:%d] From metadata aperture = %f ", __func__, __LINE__,
+    p_sensor_params->aperture_value );
+
+  if (p_sensor_params->aperture_value >= 1.0) {
+    double apex_value;
+    apex_value = (double)2.0 * log(p_sensor_params->aperture_value) / log(2.0);
+    val_rat.num = (uint32_t)(apex_value * 100);
+    val_rat.denom = 100;
+    rc = addExifEntry(exif_info, EXIFTAGID_APERTURE, EXIF_RATIONAL, 1, &val_rat);
+    if (rc) {
+      ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+    }
+
+    val_rat.num = (uint32_t)(p_sensor_params->aperture_value * 100);
+    val_rat.denom = 100;
+    rc = addExifEntry(exif_info, EXIFTAGID_F_NUMBER, EXIF_RATIONAL, 1, &val_rat);
+    if (rc) {
+      ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+    }
+  }
+
+  /*Flash*/
+  short val_short;
+  int flash_mode_exif, flash_fired;
+  if (p_sensor_params->flash_state == CAM_FLASH_STATE_FIRED) {
+    flash_fired = 1;
+  } else {
+    flash_fired = 0;
+  }
+  CDBG("%s: Flash value %d flash mode %d flash state %d", __func__, val_short,
+    p_sensor_params->flash_mode, p_sensor_params->flash_state);
+
+  switch(p_sensor_params->flash_mode) {
+  case  CAM_FLASH_MODE_OFF:
+    flash_mode_exif = MM_JPEG_EXIF_FLASH_MODE_OFF;
+    break;
+  case CAM_FLASH_MODE_ON:
+    flash_mode_exif = MM_JPEG_EXIF_FLASH_MODE_ON;
+    break;
+  case CAM_FLASH_MODE_AUTO:
+    flash_mode_exif = MM_JPEG_EXIF_FLASH_MODE_AUTO;
+    break;
+  default:
+    flash_mode_exif = MM_JPEG_EXIF_FLASH_MODE_AUTO;
+    ALOGE("%s:%d]: Unsupported flash mode", __func__, __LINE__);
+  }
+  val_short = (short)(flash_fired | (flash_mode_exif << 3));
+
+  rc = addExifEntry(exif_info, EXIFTAGID_FLASH, EXIF_SHORT, 1, &val_short);
+  if (rc) {
+    ALOGE("%s %d]: Error adding flash exif entry", __func__, __LINE__);
+  }
+  /* Sensing Method */
+  val_short = (short) p_sensor_params->sensing_method;
+  rc = addExifEntry(exif_info, EXIFTAGID_SENSING_METHOD, EXIF_SHORT,
+    sizeof(val_short)/2, &val_short);
+  if (rc) {
+    ALOGE("%s:%d]: Error adding flash Exif Entry", __func__, __LINE__);
+  }
+
+  /* Focal Length in 35 MM Film */
+  val_short = (short)
+    ((p_sensor_params->focal_length * p_sensor_params->crop_factor) + 0.5f);
+  rc = addExifEntry(exif_info, EXIFTAGID_FOCAL_LENGTH_35MM, EXIF_SHORT,
+    1, &val_short);
+  if (rc) {
+    ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+  }
+
+  /* F Number */
+  val_rat.num = (uint32_t)(p_sensor_params->f_number * 100);
+  val_rat.denom = 100;
+  rc = addExifEntry(exif_info, EXIFTAGTYPE_F_NUMBER, EXIF_RATIONAL, 1, &val_rat);
+  if (rc) {
+    ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+  }
+  return rc;
+}
+
+
+/** process_3a_data:
+ *
+ *  Arguments:
+ *   @p_3a_params : ptr to 3a data
+ *
+ *  Return     : int32_t type of status
+ *               NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *  Description:
+ *       process 3a data
+ *
+ *  Notes: this needs to be filled for the metadata
+ **/
+int process_3a_data(cam_3a_params_t *p_3a_params, QOMX_EXIF_INFO *exif_info)
+{
+  int rc = 0;
+  srat_t val_srat;
+  rat_t val_rat;
+  double shutter_speed_value;
+
+  if (NULL == p_3a_params) {
+    ALOGE("%s %d: 3A params are null", __func__, __LINE__);
+    return 0;
+  }
+
+  CDBG("%s:%d] exp_time %f, iso_value %d, wb_mode %d", __func__, __LINE__,
+    p_3a_params->exp_time, p_3a_params->iso_value, p_3a_params->wb_mode);
+
+  /*Exposure time*/
+  if (0.0f >= p_3a_params->exp_time) {
+      val_rat.num = 0;
+      val_rat.denom = 0;
+  } else {
+      val_rat.num = 1;
+      val_rat.denom = ROUND(1.0/p_3a_params->exp_time);
+  }
+  CDBG("%s: numer %d denom %d %zd", __func__, val_rat.num, val_rat.denom,
+      sizeof(val_rat) / (8));
+
+  rc = addExifEntry(exif_info, EXIFTAGID_EXPOSURE_TIME, EXIF_RATIONAL,
+    (sizeof(val_rat)/(8)), &val_rat);
+  if (rc) {
+    ALOGE("%s:%d]: Error adding Exif Entry Exposure time",
+      __func__, __LINE__);
+  }
+
+  /* Shutter Speed*/
+  if (p_3a_params->exp_time > 0) {
+    shutter_speed_value = log10(1/p_3a_params->exp_time)/log10(2);
+    val_srat.num = (int32_t)(shutter_speed_value * 1000);
+    val_srat.denom = 1000;
+  } else {
+    val_srat.num = 0;
+    val_srat.denom = 0;
+  }
+  rc = addExifEntry(exif_info, EXIFTAGID_SHUTTER_SPEED, EXIF_SRATIONAL,
+    (sizeof(val_srat)/(8)), &val_srat);
+  if (rc) {
+    ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+  }
+
+  /*ISO*/
+  short val_short;
+  val_short = (short)p_3a_params->iso_value;
+  rc = addExifEntry(exif_info, EXIFTAGID_ISO_SPEED_RATING, EXIF_SHORT,
+    sizeof(val_short)/2, &val_short);
+  if (rc) {
+    ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+  }
+
+  /*WB mode*/
+  if (p_3a_params->wb_mode == CAM_WB_MODE_AUTO)
+    val_short = 0;
+  else
+    val_short = 1;
+  rc = addExifEntry(exif_info, EXIFTAGID_WHITE_BALANCE, EXIF_SHORT,
+    sizeof(val_short)/2, &val_short);
+  if (rc) {
+    ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+  }
+
+  /* Metering Mode   */
+  val_short = (short) p_3a_params->metering_mode;
+  rc = addExifEntry(exif_info,EXIFTAGID_METERING_MODE, EXIF_SHORT,
+     sizeof(val_short)/2, &val_short);
+  if (rc) {
+     ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+   }
+
+  /*Exposure Program*/
+   val_short = (short) p_3a_params->exposure_program;
+   rc = addExifEntry(exif_info,EXIFTAGID_EXPOSURE_PROGRAM, EXIF_SHORT,
+      sizeof(val_short)/2, &val_short);
+   if (rc) {
+      ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+    }
+
+   /*Exposure Mode */
+    val_short = (short) p_3a_params->exposure_mode;
+    rc = addExifEntry(exif_info,EXIFTAGID_EXPOSURE_MODE, EXIF_SHORT,
+       sizeof(val_short)/2, &val_short);
+    if (rc) {
+       ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+     }
+
+    /*Scenetype*/
+     uint8_t val_undef;
+     val_undef = (uint8_t) p_3a_params->scenetype;
+     rc = addExifEntry(exif_info,EXIFTAGID_SCENE_TYPE, EXIF_UNDEFINED,
+        sizeof(val_undef), &val_undef);
+     if (rc) {
+        ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+      }
+
+     CDBG("%s:%d] brightness %f", __func__, __LINE__,
+       p_3a_params->brightness);
+
+    /* Brightness Value*/
+     val_srat.num = (int32_t) (p_3a_params->brightness * 100.0f);
+     val_srat.denom = 100;
+     rc = addExifEntry(exif_info,EXIFTAGID_BRIGHTNESS, EXIF_SRATIONAL,
+                 (sizeof(val_srat)/(8)), &val_srat);
+     if (rc) {
+        ALOGE("%s:%d]: Error adding Exif Entry", __func__, __LINE__);
+     }
+
+  return rc;
+}
+
+/** process_meta_data
+ *
+ *  Arguments:
+ *   @p_meta : ptr to metadata
+ *   @exif_info: Exif info struct
+ *   @mm_jpeg_exif_params: exif params
+ *
+ *  Return     : int32_t type of status
+ *               NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *  Description:
+ *       Extract exif data from the metadata
+ **/
+int process_meta_data(metadata_buffer_t *p_meta, QOMX_EXIF_INFO *exif_info,
+  mm_jpeg_exif_params_t *p_cam_exif_params, cam_hal_version_t hal_version)
+{
+  int rc = 0;
+  cam_sensor_params_t p_sensor_params;
+  cam_3a_params_t p_3a_params;
+
+  memset(&p_3a_params,  0,  sizeof(cam_3a_params_t));
+  memset(&p_sensor_params, 0, sizeof(cam_sensor_params_t));
+
+  if (hal_version == CAM_HAL_V1) {
+    IF_META_AVAILABLE(cam_3a_params_t, l_3a_params, CAM_INTF_META_AEC_INFO,
+        p_meta) {
+      p_3a_params = *l_3a_params;
+    } else if (p_cam_exif_params) {
+      p_3a_params = p_cam_exif_params->cam_3a_params;
+    } else {
+      p_3a_params.exp_time = 0.0;
+      p_3a_params.iso_value = 0;
+      p_3a_params.metering_mode = CAM_METERING_MODE_UNKNOWN;
+      p_3a_params.exposure_program = 0;
+      p_3a_params.exposure_mode = 255;
+      p_3a_params.scenetype = 1;
+      p_3a_params.brightness = 0.0;
+    }
+
+    IF_META_AVAILABLE(int32_t, wb_mode, CAM_INTF_PARM_WHITE_BALANCE, p_meta) {
+      p_3a_params.wb_mode = *wb_mode;
+    }
+
+    IF_META_AVAILABLE(cam_sensor_params_t, l_sensor_params,
+        CAM_INTF_META_SENSOR_INFO, p_meta) {
+      p_sensor_params = *l_sensor_params;
+    } else if (p_cam_exif_params) {
+      p_sensor_params = p_cam_exif_params->sensor_params;
+    } else {
+      p_sensor_params.focal_length = 0;
+      p_sensor_params.f_number = 0;
+      p_sensor_params.sensing_method = 2;
+      p_sensor_params.crop_factor = 0;
+    }
+  } else {
+
+    /* Process 3a data */
+    IF_META_AVAILABLE(int32_t, iso, CAM_INTF_META_SENSOR_SENSITIVITY, p_meta) {
+      p_3a_params.iso_value= *iso;
+    } else {
+      ALOGE("%s: Cannot extract Iso value", __func__);
+    }
+
+    IF_META_AVAILABLE(int64_t, sensor_exposure_time,
+        CAM_INTF_META_SENSOR_EXPOSURE_TIME, p_meta) {
+      p_3a_params.exp_time =
+        (float)((double)(*sensor_exposure_time) / 1000000000.0);
+    } else {
+      ALOGE("%s: Cannot extract Exp time value", __func__);
+    }
+
+    IF_META_AVAILABLE(int32_t, wb_mode, CAM_INTF_PARM_WHITE_BALANCE, p_meta) {
+      p_3a_params.wb_mode = *wb_mode;
+    } else {
+      ALOGE("%s: Cannot extract white balance mode", __func__);
+    }
+
+    /* Process sensor data */
+    IF_META_AVAILABLE(float, aperture, CAM_INTF_META_LENS_APERTURE, p_meta) {
+      p_sensor_params.aperture_value = *aperture;
+    } else {
+      ALOGE("%s: Cannot extract Aperture value", __func__);
+    }
+
+    IF_META_AVAILABLE(uint32_t, flash_mode, CAM_INTF_META_FLASH_MODE, p_meta) {
+      p_sensor_params.flash_mode = *flash_mode;
+    } else {
+      ALOGE("%s: Cannot extract flash mode value", __func__);
+    }
+
+    IF_META_AVAILABLE(int32_t, flash_state, CAM_INTF_META_FLASH_STATE, p_meta) {
+      p_sensor_params.flash_state = (cam_flash_state_t) *flash_state;
+    } else {
+      ALOGE("%s: Cannot extract flash state value", __func__);
+    }
+  }
+  if ((hal_version != CAM_HAL_V1) || (p_sensor_params.sens_type != CAM_SENSOR_YUV)) {
+    rc = process_3a_data(&p_3a_params, exif_info);
+    if (rc) {
+      ALOGE("%s %d: Failed to add 3a exif params", __func__, __LINE__);
+    }
+  }
+
+  rc = process_sensor_data(&p_sensor_params, exif_info);
+  if (rc) {
+    ALOGE("%s %d: Failed to extract sensor params", __func__, __LINE__);
+  }
+
+  if (p_meta) {
+    short val_short = 0;
+
+    IF_META_AVAILABLE(cam_auto_scene_t, scene_cap_type,
+        CAM_INTF_META_ASD_SCENE_CAPTURE_TYPE, p_meta) {
+      val_short = (short) *scene_cap_type;
+    }
+
+    rc = addExifEntry(exif_info, EXIFTAGID_SCENE_CAPTURE_TYPE, EXIF_SHORT,
+      sizeof(val_short)/2, &val_short);
+    if (rc) {
+      ALOGE("%s:%d]: Error adding ASD Exif Entry", __func__, __LINE__);
+    }
+  } else {
+    ALOGE("%s:%d]: Error adding ASD Exif Entry, no meta", __func__, __LINE__);
+  }
+  return rc;
+}
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_interface.c b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_interface.c
new file mode 100644
index 0000000..29128b9
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_interface.c
@@ -0,0 +1,373 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <stdlib.h>
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <cutils/properties.h>
+#include <stdlib.h>
+
+#include "mm_jpeg_dbg.h"
+#include "mm_jpeg_interface.h"
+#include "mm_jpeg.h"
+
+static pthread_mutex_t g_intf_lock = PTHREAD_MUTEX_INITIALIZER;
+static mm_jpeg_obj* g_jpeg_obj = NULL;
+
+static pthread_mutex_t g_handler_lock = PTHREAD_MUTEX_INITIALIZER;
+static uint16_t g_handler_history_count = 0; /* history count for handler */
+volatile uint32_t gMmJpegIntfLogLevel = 1;
+
+/** mm_jpeg_util_generate_handler:
+ *
+ *  Arguments:
+ *    @index: client index
+ *
+ *  Return:
+ *       handle value
+ *
+ *  Description:
+ *       utility function to generate handler
+ *
+ **/
+uint32_t mm_jpeg_util_generate_handler(uint8_t index)
+{
+  uint32_t handler = 0;
+  pthread_mutex_lock(&g_handler_lock);
+  g_handler_history_count++;
+  if (0 == g_handler_history_count) {
+    g_handler_history_count++;
+  }
+  handler = g_handler_history_count;
+  handler = (handler<<8) | index;
+  pthread_mutex_unlock(&g_handler_lock);
+  return handler;
+}
+
+/** mm_jpeg_util_get_index_by_handler:
+ *
+ *  Arguments:
+ *    @handler: handle value
+ *
+ *  Return:
+ *       client index
+ *
+ *  Description:
+ *       get client index
+ *
+ **/
+uint8_t mm_jpeg_util_get_index_by_handler(uint32_t handler)
+{
+  return (handler & 0x000000ff);
+}
+
+/** mm_jpeg_intf_start_job:
+ *
+ *  Arguments:
+ *    @client_hdl: client handle
+ *    @job: jpeg job object
+ *    @jobId: job id
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       start the jpeg job
+ *
+ **/
+static int32_t mm_jpeg_intf_start_job(mm_jpeg_job_t* job, uint32_t* job_id)
+{
+  int32_t rc = -1;
+
+  if (NULL == job ||
+    NULL == job_id) {
+    CDBG_ERROR("%s:%d] invalid parameters for job or jobId", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_intf_lock);
+  if (NULL == g_jpeg_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_intf_lock);
+    return rc;
+  }
+  rc = mm_jpeg_start_job(g_jpeg_obj, job, job_id);
+  pthread_mutex_unlock(&g_intf_lock);
+  return rc;
+}
+
+/** mm_jpeg_intf_create_session:
+ *
+ *  Arguments:
+ *    @client_hdl: client handle
+ *    @p_params: encode parameters
+ *    @p_session_id: session id
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       Create new jpeg session
+ *
+ **/
+static int32_t mm_jpeg_intf_create_session(uint32_t client_hdl,
+    mm_jpeg_encode_params_t *p_params,
+    uint32_t *p_session_id)
+{
+  int32_t rc = -1;
+
+  if (0 == client_hdl || NULL == p_params || NULL == p_session_id) {
+    CDBG_ERROR("%s:%d] invalid client_hdl or jobId", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_intf_lock);
+  if (NULL == g_jpeg_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_intf_lock);
+    return rc;
+  }
+
+ rc = mm_jpeg_create_session(g_jpeg_obj, client_hdl, p_params, p_session_id);
+  pthread_mutex_unlock(&g_intf_lock);
+  return rc;
+}
+
+/** mm_jpeg_intf_destroy_session:
+ *
+ *  Arguments:
+ *    @session_id: session id
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       Destroy jpeg session
+ *
+ **/
+static int32_t mm_jpeg_intf_destroy_session(uint32_t session_id)
+{
+  int32_t rc = -1;
+
+  if (0 == session_id) {
+    CDBG_ERROR("%s:%d] invalid client_hdl or jobId", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_intf_lock);
+  if (NULL == g_jpeg_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_intf_lock);
+    return rc;
+  }
+
+  rc = mm_jpeg_destroy_session_by_id(g_jpeg_obj, session_id);
+  pthread_mutex_unlock(&g_intf_lock);
+  return rc;
+}
+
+/** mm_jpeg_intf_abort_job:
+ *
+ *  Arguments:
+ *    @jobId: job id
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       Abort the jpeg job
+ *
+ **/
+static int32_t mm_jpeg_intf_abort_job(uint32_t job_id)
+{
+  int32_t rc = -1;
+
+  if (0 == job_id) {
+    CDBG_ERROR("%s:%d] invalid jobId", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_intf_lock);
+  if (NULL == g_jpeg_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_intf_lock);
+    return rc;
+  }
+
+  rc = mm_jpeg_abort_job(g_jpeg_obj, job_id);
+  pthread_mutex_unlock(&g_intf_lock);
+  return rc;
+}
+
+/** mm_jpeg_intf_close:
+ *
+ *  Arguments:
+ *    @client_hdl: client handle
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       Close the jpeg job
+ *
+ **/
+static int32_t mm_jpeg_intf_close(uint32_t client_hdl)
+{
+  int32_t rc = -1;
+
+  if (0 == client_hdl) {
+    CDBG_ERROR("%s:%d] invalid client_hdl", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_intf_lock);
+  if (NULL == g_jpeg_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_intf_lock);
+    return rc;
+  }
+
+  rc = mm_jpeg_close(g_jpeg_obj, client_hdl);
+  g_jpeg_obj->num_clients--;
+  if(0 == rc) {
+    if (0 == g_jpeg_obj->num_clients) {
+      /* No client, close jpeg internally */
+      rc = mm_jpeg_deinit(g_jpeg_obj);
+      free(g_jpeg_obj);
+      g_jpeg_obj = NULL;
+    }
+  }
+
+  pthread_mutex_unlock(&g_intf_lock);
+  return rc;
+}
+
+/** jpeg_open:
+ *
+ *  Arguments:
+ *    @ops: ops table pointer
+ *
+ *  Return:
+ *       0 failure, success otherwise
+ *
+ *  Description:
+ *       Open a jpeg client
+ *
+ **/
+uint32_t jpeg_open(mm_jpeg_ops_t *ops, mm_dimension picture_size)
+{
+  int32_t rc = 0;
+  uint32_t clnt_hdl = 0;
+  mm_jpeg_obj* jpeg_obj = NULL;
+  char prop[PROPERTY_VALUE_MAX];
+  uint32_t globalLogLevel = 0;
+
+  memset(prop, 0x0, sizeof(prop));
+  property_get("persist.camera.hal.debug", prop, "0");
+  int val = atoi(prop);
+  if (0 <= val) {
+      gMmJpegIntfLogLevel = (uint32_t)val;
+  }
+  property_get("persist.camera.global.debug", prop, "0");
+  val = atoi(prop);
+  if (0 <= val) {
+      globalLogLevel = (uint32_t)val;
+  }
+
+  /* Highest log level among hal.logs and global.logs is selected */
+  if (gMmJpegIntfLogLevel < globalLogLevel)
+      gMmJpegIntfLogLevel = globalLogLevel;
+  if (gMmJpegIntfLogLevel < MINIMUM_JPEG_LOG_LEVEL)
+      gMmJpegIntfLogLevel = MINIMUM_JPEG_LOG_LEVEL;
+
+  pthread_mutex_lock(&g_intf_lock);
+  /* first time open */
+  if(NULL == g_jpeg_obj) {
+    jpeg_obj = (mm_jpeg_obj *)malloc(sizeof(mm_jpeg_obj));
+    if(NULL == jpeg_obj) {
+      CDBG_ERROR("%s:%d] no mem", __func__, __LINE__);
+      pthread_mutex_unlock(&g_intf_lock);
+      return clnt_hdl;
+    }
+
+    /* initialize jpeg obj */
+    memset(jpeg_obj, 0, sizeof(mm_jpeg_obj));
+
+    /* used for work buf calculation */
+    jpeg_obj->max_pic_w = picture_size.w;
+    jpeg_obj->max_pic_h = picture_size.h;
+
+    rc = mm_jpeg_init(jpeg_obj);
+    if(0 != rc) {
+      CDBG_ERROR("%s:%d] mm_jpeg_init err = %d", __func__, __LINE__, rc);
+      free(jpeg_obj);
+      pthread_mutex_unlock(&g_intf_lock);
+      return clnt_hdl;
+    }
+
+    /* remember in global variable */
+    g_jpeg_obj = jpeg_obj;
+  }
+
+  /* open new client */
+  clnt_hdl = mm_jpeg_new_client(g_jpeg_obj);
+  if (clnt_hdl > 0) {
+    /* valid client */
+    if (NULL != ops) {
+      /* fill in ops tbl if ptr not NULL */
+      ops->start_job = mm_jpeg_intf_start_job;
+      ops->abort_job = mm_jpeg_intf_abort_job;
+      ops->create_session = mm_jpeg_intf_create_session;
+      ops->destroy_session = mm_jpeg_intf_destroy_session;
+      ops->close = mm_jpeg_intf_close;
+    }
+  } else {
+    /* failed new client */
+    CDBG_ERROR("%s:%d] mm_jpeg_new_client failed", __func__, __LINE__);
+
+    if (0 == g_jpeg_obj->num_clients) {
+      /* no client, close jpeg */
+      mm_jpeg_deinit(g_jpeg_obj);
+      free(g_jpeg_obj);
+      g_jpeg_obj = NULL;
+    }
+  }
+
+  pthread_mutex_unlock(&g_intf_lock);
+  return clnt_hdl;
+}
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_ionbuf.c b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_ionbuf.c
new file mode 100644
index 0000000..e16b2f4
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_ionbuf.c
@@ -0,0 +1,165 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "mm_jpeg_ionbuf.h"
+#include <stdio.h>
+#include <string.h>
+#include <linux/msm_ion.h>
+
+/** buffer_allocate:
+ *
+ *  Arguments:
+ *     @p_buffer: ION buffer
+ *
+ *  Return:
+ *     buffer address
+ *
+ *  Description:
+ *      allocates ION buffer
+ *
+ **/
+void *buffer_allocate(buffer_t *p_buffer, int cached)
+{
+  void *l_buffer = NULL;
+
+  int lrc = 0;
+  struct ion_handle_data lhandle_data;
+
+   p_buffer->alloc.len = p_buffer->size;
+   p_buffer->alloc.align = 4096;
+   p_buffer->alloc.flags = (cached) ? ION_FLAG_CACHED : 0;
+   p_buffer->alloc.heap_id_mask = 0x1 << ION_IOMMU_HEAP_ID;
+
+   p_buffer->ion_fd = open("/dev/ion", O_RDONLY);
+   if(p_buffer->ion_fd < 0) {
+    CDBG_ERROR("%s :Ion open failed", __func__);
+    goto ION_ALLOC_FAILED;
+  }
+
+  /* Make it page size aligned */
+  p_buffer->alloc.len = (p_buffer->alloc.len + 4095U) & (~4095U);
+  lrc = ioctl(p_buffer->ion_fd, ION_IOC_ALLOC, &p_buffer->alloc);
+  if (lrc < 0) {
+    CDBG_ERROR("%s :ION allocation failed len %zu", __func__,
+      p_buffer->alloc.len);
+    goto ION_ALLOC_FAILED;
+  }
+
+  p_buffer->ion_info_fd.handle = p_buffer->alloc.handle;
+  lrc = ioctl(p_buffer->ion_fd, ION_IOC_SHARE,
+    &p_buffer->ion_info_fd);
+  if (lrc < 0) {
+    CDBG_ERROR("%s :ION map failed %s", __func__, strerror(errno));
+    goto ION_MAP_FAILED;
+  }
+
+  p_buffer->p_pmem_fd = p_buffer->ion_info_fd.fd;
+
+  l_buffer = mmap(NULL, p_buffer->alloc.len, PROT_READ  | PROT_WRITE,
+    MAP_SHARED,p_buffer->p_pmem_fd, 0);
+
+  if (l_buffer == MAP_FAILED) {
+    CDBG_ERROR("%s :ION_MMAP_FAILED: %s (%d)", __func__,
+      strerror(errno), errno);
+    goto ION_MAP_FAILED;
+  }
+
+  return l_buffer;
+
+ION_MAP_FAILED:
+  lhandle_data.handle = p_buffer->ion_info_fd.handle;
+  ioctl(p_buffer->ion_fd, ION_IOC_FREE, &lhandle_data);
+  return NULL;
+ION_ALLOC_FAILED:
+  return NULL;
+
+}
+
+/** buffer_deallocate:
+ *
+ *  Arguments:
+ *     @p_buffer: ION buffer
+ *
+ *  Return:
+ *     buffer address
+ *
+ *  Description:
+ *      deallocates ION buffer
+ *
+ **/
+int buffer_deallocate(buffer_t *p_buffer)
+{
+  int lrc = 0;
+  size_t lsize = (p_buffer->size + 4095U) & (~4095U);
+
+  struct ion_handle_data lhandle_data;
+  lrc = munmap(p_buffer->addr, lsize);
+
+  close(p_buffer->ion_info_fd.fd);
+
+  lhandle_data.handle = p_buffer->ion_info_fd.handle;
+  ioctl(p_buffer->ion_fd, ION_IOC_FREE, &lhandle_data);
+
+  close(p_buffer->ion_fd);
+  return lrc;
+}
+
+/** buffer_invalidate:
+ *
+ *  Arguments:
+ *     @p_buffer: ION buffer
+ *
+ *  Return:
+ *     error val
+ *
+ *  Description:
+ *      Invalidates the cached buffer
+ *
+ **/
+int buffer_invalidate(buffer_t *p_buffer)
+{
+  int lrc = 0;
+  struct ion_flush_data cache_inv_data;
+  struct ion_custom_data custom_data;
+
+  memset(&cache_inv_data, 0, sizeof(cache_inv_data));
+  memset(&custom_data, 0, sizeof(custom_data));
+  cache_inv_data.vaddr = p_buffer->addr;
+  cache_inv_data.fd = p_buffer->ion_info_fd.fd;
+  cache_inv_data.handle = p_buffer->ion_info_fd.handle;
+  cache_inv_data.length = (unsigned int)p_buffer->size;
+  custom_data.cmd = (unsigned int)ION_IOC_INV_CACHES;
+  custom_data.arg = (unsigned long)&cache_inv_data;
+
+  lrc = ioctl(p_buffer->ion_fd, ION_IOC_CUSTOM, &custom_data);
+  if (lrc < 0)
+    CDBG_ERROR("%s: Cache Invalidate failed: %s\n", __func__, strerror(errno));
+
+  return lrc;
+}
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_queue.c b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_queue.c
new file mode 100644
index 0000000..2938532
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpeg_queue.c
@@ -0,0 +1,183 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pthread.h>
+#include "mm_jpeg_dbg.h"
+#include "mm_jpeg.h"
+
+int32_t mm_jpeg_queue_init(mm_jpeg_queue_t* queue)
+{
+    pthread_mutex_init(&queue->lock, NULL);
+    cam_list_init(&queue->head.list);
+    queue->size = 0;
+    return 0;
+}
+
+int32_t mm_jpeg_queue_enq(mm_jpeg_queue_t* queue, mm_jpeg_q_data_t data)
+{
+    mm_jpeg_q_node_t* node =
+        (mm_jpeg_q_node_t *)malloc(sizeof(mm_jpeg_q_node_t));
+    if (NULL == node) {
+        CDBG_ERROR("%s: No memory for mm_jpeg_q_node_t", __func__);
+        return -1;
+    }
+
+    memset(node, 0, sizeof(mm_jpeg_q_node_t));
+    node->data = data;
+
+    pthread_mutex_lock(&queue->lock);
+    cam_list_add_tail_node(&node->list, &queue->head.list);
+    queue->size++;
+    pthread_mutex_unlock(&queue->lock);
+
+    return 0;
+
+}
+
+int32_t mm_jpeg_queue_enq_head(mm_jpeg_queue_t* queue, mm_jpeg_q_data_t data)
+{
+  struct cam_list *head = NULL;
+  struct cam_list *pos = NULL;
+  mm_jpeg_q_node_t* node =
+        (mm_jpeg_q_node_t *)malloc(sizeof(mm_jpeg_q_node_t));
+    if (NULL == node) {
+        CDBG_ERROR("%s: No memory for mm_jpeg_q_node_t", __func__);
+        return -1;
+    }
+
+    memset(node, 0, sizeof(mm_jpeg_q_node_t));
+    node->data = data;
+
+    head = &queue->head.list;
+    pos = head->next;
+
+    pthread_mutex_lock(&queue->lock);
+    cam_list_insert_before_node(&node->list, pos);
+    queue->size++;
+    pthread_mutex_unlock(&queue->lock);
+
+    return 0;
+}
+
+mm_jpeg_q_data_t mm_jpeg_queue_deq(mm_jpeg_queue_t* queue)
+{
+    mm_jpeg_q_data_t data;
+    mm_jpeg_q_node_t* node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    memset(&data, 0, sizeof(data));
+
+    pthread_mutex_lock(&queue->lock);
+    head = &queue->head.list;
+    pos = head->next;
+    if (pos != head) {
+        node = member_of(pos, mm_jpeg_q_node_t, list);
+        cam_list_del_node(&node->list);
+        queue->size--;
+    }
+    pthread_mutex_unlock(&queue->lock);
+
+    if (NULL != node) {
+        data = node->data;
+        free(node);
+    }
+
+    return data;
+}
+
+uint32_t mm_jpeg_queue_get_size(mm_jpeg_queue_t* queue)
+{
+    uint32_t size = 0;
+
+    pthread_mutex_lock(&queue->lock);
+    size = queue->size;
+    pthread_mutex_unlock(&queue->lock);
+
+    return size;
+
+}
+
+int32_t mm_jpeg_queue_deinit(mm_jpeg_queue_t* queue)
+{
+    mm_jpeg_queue_flush(queue);
+    pthread_mutex_destroy(&queue->lock);
+    return 0;
+}
+
+int32_t mm_jpeg_queue_flush(mm_jpeg_queue_t* queue)
+{
+    mm_jpeg_q_node_t* node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    pthread_mutex_lock(&queue->lock);
+    head = &queue->head.list;
+    pos = head->next;
+
+    while(pos != head) {
+        node = member_of(pos, mm_jpeg_q_node_t, list);
+        cam_list_del_node(&node->list);
+        queue->size--;
+
+        /* for now we only assume there is no ptr inside data
+         * so we free data directly */
+        if (NULL != node->data.p) {
+            free(node->data.p);
+        }
+        free(node);
+        pos = pos->next;
+    }
+    queue->size = 0;
+    pthread_mutex_unlock(&queue->lock);
+    return 0;
+}
+
+mm_jpeg_q_data_t mm_jpeg_queue_peek(mm_jpeg_queue_t* queue)
+{
+    mm_jpeg_q_data_t data;
+    mm_jpeg_q_node_t* node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    memset(&data, 0, sizeof(data));
+
+    pthread_mutex_lock(&queue->lock);
+    head = &queue->head.list;
+    pos = head->next;
+    if (pos != head) {
+        node = member_of(pos, mm_jpeg_q_node_t, list);
+    }
+    pthread_mutex_unlock(&queue->lock);
+
+    if (NULL != node) {
+        data = node->data;
+    }
+    return data;
+}
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpegdec.c b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpegdec.c
new file mode 100644
index 0000000..117eb3b
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpegdec.c
@@ -0,0 +1,1192 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <poll.h>
+
+#include "mm_jpeg_dbg.h"
+#include "mm_jpeg_interface.h"
+#include "mm_jpeg.h"
+#include "mm_jpeg_inlines.h"
+
+OMX_ERRORTYPE mm_jpegdec_ebd(OMX_HANDLETYPE hComponent,
+  OMX_PTR pAppData,
+  OMX_BUFFERHEADERTYPE *pBuffer);
+OMX_ERRORTYPE mm_jpegdec_fbd(OMX_HANDLETYPE hComponent,
+    OMX_PTR pAppData,
+    OMX_BUFFERHEADERTYPE* pBuffer);
+OMX_ERRORTYPE mm_jpegdec_event_handler(OMX_HANDLETYPE hComponent,
+    OMX_PTR pAppData,
+    OMX_EVENTTYPE eEvent,
+    OMX_U32 nData1,
+    OMX_U32 nData2,
+    OMX_PTR pEventData);
+
+
+/** mm_jpegdec_destroy_job
+ *
+ *  Arguments:
+ *    @p_session: Session obj
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Destroy the job based paramenters
+ *
+ **/
+static int32_t mm_jpegdec_destroy_job(mm_jpeg_job_session_t *p_session)
+{
+  int32_t rc = 0;
+
+  return rc;
+}
+
+/** mm_jpeg_job_done:
+ *
+ *  Arguments:
+ *    @p_session: decode session
+ *
+ *  Return:
+ *       OMX_ERRORTYPE
+ *
+ *  Description:
+ *       Finalize the job
+ *
+ **/
+static void mm_jpegdec_job_done(mm_jpeg_job_session_t *p_session)
+{
+  mm_jpeg_obj *my_obj = (mm_jpeg_obj *)p_session->jpeg_obj;
+  mm_jpeg_job_q_node_t *node = NULL;
+
+  /*Destroy job related params*/
+  mm_jpegdec_destroy_job(p_session);
+
+  /*remove the job*/
+  node = mm_jpeg_queue_remove_job_by_job_id(&my_obj->ongoing_job_q,
+    p_session->jobId);
+  if (node) {
+    free(node);
+  }
+  p_session->encoding = OMX_FALSE;
+
+  /* wake up jobMgr thread to work on new job if there is any */
+  cam_sem_post(&my_obj->job_mgr.job_sem);
+}
+
+
+/** mm_jpegdec_session_send_buffers:
+ *
+ *  Arguments:
+ *    @data: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Send the buffers to OMX layer
+ *
+ **/
+OMX_ERRORTYPE mm_jpegdec_session_send_buffers(void *data)
+{
+  uint32_t i = 0;
+  mm_jpeg_job_session_t* p_session = (mm_jpeg_job_session_t *)data;
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  QOMX_BUFFER_INFO lbuffer_info;
+  mm_jpeg_decode_params_t *p_params = &p_session->dec_params;
+
+  memset(&lbuffer_info, 0x0, sizeof(QOMX_BUFFER_INFO));
+  for (i = 0; i < p_params->num_src_bufs; i++) {
+    CDBG("%s:%d] Source buffer %d", __func__, __LINE__, i);
+    lbuffer_info.fd = (OMX_U32)p_params->src_main_buf[i].fd;
+    ret = OMX_UseBuffer(p_session->omx_handle, &(p_session->p_in_omx_buf[i]), 0,
+      &lbuffer_info, p_params->src_main_buf[i].buf_size,
+      p_params->src_main_buf[i].buf_vaddr);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+      return ret;
+    }
+  }
+
+  CDBG("%s:%d]", __func__, __LINE__);
+  return ret;
+}
+
+/** mm_jpeg_session_free_buffers:
+ *
+ *  Arguments:
+ *    @data: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Free the buffers from OMX layer
+ *
+ **/
+OMX_ERRORTYPE mm_jpegdec_session_free_buffers(void *data)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  uint32_t i = 0;
+  mm_jpeg_job_session_t* p_session = (mm_jpeg_job_session_t *)data;
+  mm_jpeg_decode_params_t *p_params = &p_session->dec_params;
+
+  for (i = 0; i < p_params->num_src_bufs; i++) {
+    CDBG("%s:%d] Source buffer %d", __func__, __LINE__, i);
+    ret = OMX_FreeBuffer(p_session->omx_handle, 0, p_session->p_in_omx_buf[i]);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error %d", __func__, __LINE__, ret);
+      return ret;
+    }
+  }
+
+  for (i = 0; i < p_params->num_dst_bufs; i++) {
+    CDBG("%s:%d] Dest buffer %d", __func__, __LINE__, i);
+    ret = OMX_FreeBuffer(p_session->omx_handle, 1, p_session->p_out_omx_buf[i]);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+      return ret;
+    }
+  }
+  CDBG("%s:%d]", __func__, __LINE__);
+  return ret;
+}
+
+/** mm_jpegdec_session_create:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error types
+ *
+ *  Description:
+ *       Create a jpeg encode session
+ *
+ **/
+OMX_ERRORTYPE mm_jpegdec_session_create(mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+
+  pthread_mutex_init(&p_session->lock, NULL);
+  pthread_cond_init(&p_session->cond, NULL);
+  cirq_reset(&p_session->cb_q);
+  p_session->state_change_pending = OMX_FALSE;
+  p_session->abort_state = MM_JPEG_ABORT_NONE;
+  p_session->error_flag = OMX_ErrorNone;
+  p_session->ebd_count = 0;
+  p_session->fbd_count = 0;
+  p_session->encode_pid = -1;
+  p_session->config = OMX_FALSE;
+
+  p_session->omx_callbacks.EmptyBufferDone = mm_jpegdec_ebd;
+  p_session->omx_callbacks.FillBufferDone = mm_jpegdec_fbd;
+  p_session->omx_callbacks.EventHandler = mm_jpegdec_event_handler;
+  p_session->exif_count_local = 0;
+
+  rc = OMX_GetHandle(&p_session->omx_handle,
+    "OMX.qcom.image.jpeg.decoder",
+    (void *)p_session,
+    &p_session->omx_callbacks);
+
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s:%d] OMX_GetHandle failed (%d)", __func__, __LINE__, rc);
+    return rc;
+  }
+  return rc;
+}
+
+/** mm_jpegdec_session_destroy:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       none
+ *
+ *  Description:
+ *       Destroy a jpeg encode session
+ *
+ **/
+void mm_jpegdec_session_destroy(mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+
+  CDBG("%s:%d] E", __func__, __LINE__);
+  if (NULL == p_session->omx_handle) {
+    CDBG_ERROR("%s:%d] invalid handle", __func__, __LINE__);
+    return;
+  }
+
+  rc = mm_jpeg_session_change_state(p_session, OMX_StateIdle, NULL);
+  if (rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+  }
+
+  rc = mm_jpeg_session_change_state(p_session, OMX_StateLoaded,
+    mm_jpegdec_session_free_buffers);
+  if (rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+  }
+
+  rc = OMX_FreeHandle(p_session->omx_handle);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] OMX_FreeHandle failed (%d)", __func__, __LINE__, rc);
+  }
+  p_session->omx_handle = NULL;
+
+
+  pthread_mutex_destroy(&p_session->lock);
+  pthread_cond_destroy(&p_session->cond);
+  CDBG("%s:%d] X", __func__, __LINE__);
+}
+
+/** mm_jpeg_session_config_port:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure OMX ports
+ *
+ **/
+OMX_ERRORTYPE mm_jpegdec_session_config_ports(mm_jpeg_job_session_t* p_session)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  mm_jpeg_decode_params_t *p_params = &p_session->dec_params;
+  mm_jpeg_decode_job_t *p_jobparams = &p_session->decode_job;
+
+  mm_jpeg_buf_t *p_src_buf =
+    &p_params->src_main_buf[p_jobparams->src_index];
+
+  p_session->inputPort.nPortIndex = 0;
+  p_session->outputPort.nPortIndex = 1;
+
+
+  ret = OMX_GetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+    &p_session->inputPort);
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  ret = OMX_GetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+    &p_session->outputPort);
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  p_session->inputPort.format.image.nFrameWidth =
+    (OMX_U32)p_jobparams->main_dim.src_dim.width;
+  p_session->inputPort.format.image.nFrameHeight =
+    (OMX_U32)p_jobparams->main_dim.src_dim.height;
+  p_session->inputPort.format.image.nStride =
+    p_src_buf->offset.mp[0].stride;
+  p_session->inputPort.format.image.nSliceHeight =
+    (OMX_U32)p_src_buf->offset.mp[0].scanline;
+  p_session->inputPort.format.image.eColorFormat =
+    map_jpeg_format(p_params->color_format);
+  p_session->inputPort.nBufferSize =
+    p_params->src_main_buf[p_jobparams->src_index].buf_size;
+  p_session->inputPort.nBufferCountActual = (OMX_U32)p_params->num_src_bufs;
+  ret = OMX_SetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+    &p_session->inputPort);
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  return ret;
+}
+
+
+/** mm_jpegdec_session_config_main:
+ *
+ *  Arguments:
+ *    @p_session: job session
+ *
+ *  Return:
+ *       OMX error values
+ *
+ *  Description:
+ *       Configure main image
+ *
+ **/
+OMX_ERRORTYPE mm_jpegdec_session_config_main(mm_jpeg_job_session_t *p_session)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+
+  /* config port */
+  CDBG("%s:%d] config port", __func__, __LINE__);
+  rc = mm_jpegdec_session_config_ports(p_session);
+  if (OMX_ErrorNone != rc) {
+    CDBG_ERROR("%s: config port failed", __func__);
+    return rc;
+  }
+
+
+  /* TODO: config crop */
+
+  return rc;
+}
+
+/** mm_jpeg_session_configure:
+ *
+ *  Arguments:
+ *    @data: encode session
+ *
+ *  Return:
+ *       none
+ *
+ *  Description:
+ *       Configure the session
+ *
+ **/
+static OMX_ERRORTYPE mm_jpegdec_session_configure(mm_jpeg_job_session_t *p_session)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+
+  CDBG("%s:%d] E ", __func__, __LINE__);
+
+  MM_JPEG_CHK_ABORT(p_session, ret, error);
+
+  /* config main img */
+  ret = mm_jpegdec_session_config_main(p_session);
+  if (OMX_ErrorNone != ret) {
+    CDBG_ERROR("%s:%d] config main img failed", __func__, __LINE__);
+    goto error;
+  }
+
+  /* TODO: common config (if needed) */
+
+  ret = mm_jpeg_session_change_state(p_session, OMX_StateIdle,
+    mm_jpegdec_session_send_buffers);
+  if (ret) {
+    CDBG_ERROR("%s:%d] change state to idle failed %d",
+      __func__, __LINE__, ret);
+    goto error;
+  }
+
+  ret = mm_jpeg_session_change_state(p_session, OMX_StateExecuting,
+    NULL);
+  if (ret) {
+    CDBG_ERROR("%s:%d] change state to executing failed %d",
+      __func__, __LINE__, ret);
+    goto error;
+  }
+
+error:
+  CDBG("%s:%d] X ret %d", __func__, __LINE__, ret);
+  return ret;
+}
+
+static OMX_ERRORTYPE mm_jpeg_session_port_enable(
+    mm_jpeg_job_session_t *p_session,
+    OMX_U32 nPortIndex,
+    OMX_BOOL wait)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  OMX_EVENTTYPE lEvent;
+
+  pthread_mutex_lock(&p_session->lock);
+  p_session->event_pending = OMX_TRUE;
+  pthread_mutex_unlock(&p_session->lock);
+
+  ret = OMX_SendCommand(p_session->omx_handle, OMX_CommandPortEnable,
+      nPortIndex, NULL);
+
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  if (wait == OMX_TRUE) {
+    // Wait for cmd complete
+    pthread_mutex_lock(&p_session->lock);
+    if (p_session->event_pending == OMX_TRUE) {
+      CDBG("%s:%d] before wait", __func__, __LINE__);
+      pthread_cond_wait(&p_session->cond, &p_session->lock);
+      lEvent = p_session->omxEvent;
+      CDBG("%s:%d] after wait", __func__, __LINE__);
+    }
+    lEvent = p_session->omxEvent;
+    pthread_mutex_unlock(&p_session->lock);
+
+    if (lEvent != OMX_EventCmdComplete) {
+      CDBG("%s:%d] Unexpected event %d", __func__, __LINE__,lEvent);
+      return OMX_ErrorUndefined;
+    }
+  }
+  return OMX_ErrorNone;
+}
+
+static OMX_ERRORTYPE mm_jpeg_session_port_disable(
+    mm_jpeg_job_session_t *p_session,
+    OMX_U32 nPortIndex,
+    OMX_BOOL wait)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  OMX_EVENTTYPE lEvent;
+
+  pthread_mutex_lock(&p_session->lock);
+  p_session->event_pending = OMX_TRUE;
+  pthread_mutex_unlock(&p_session->lock);
+
+  ret = OMX_SendCommand(p_session->omx_handle, OMX_CommandPortDisable,
+      nPortIndex, NULL);
+
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+  if (wait == OMX_TRUE) {
+    // Wait for cmd complete
+    pthread_mutex_lock(&p_session->lock);
+    if (p_session->event_pending == OMX_TRUE) {
+      CDBG("%s:%d] before wait", __func__, __LINE__);
+      pthread_cond_wait(&p_session->cond, &p_session->lock);
+
+      CDBG("%s:%d] after wait", __func__, __LINE__);
+    }
+    lEvent = p_session->omxEvent;
+    pthread_mutex_unlock(&p_session->lock);
+
+    if (lEvent != OMX_EventCmdComplete) {
+      CDBG("%s:%d] Unexpected event %d", __func__, __LINE__,lEvent);
+      return OMX_ErrorUndefined;
+    }
+  }
+  return OMX_ErrorNone;
+}
+
+
+/** mm_jpegdec_session_decode:
+ *
+ *  Arguments:
+ *    @p_session: encode session
+ *
+ *  Return:
+ *       OMX_ERRORTYPE
+ *
+ *  Description:
+ *       Start the encoding
+ *
+ **/
+static OMX_ERRORTYPE mm_jpegdec_session_decode(mm_jpeg_job_session_t *p_session)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  mm_jpeg_decode_params_t *p_params = &p_session->dec_params;
+  mm_jpeg_decode_job_t *p_jobparams = &p_session->decode_job;
+  OMX_EVENTTYPE lEvent;
+  uint32_t i;
+  QOMX_BUFFER_INFO lbuffer_info;
+
+  pthread_mutex_lock(&p_session->lock);
+  p_session->abort_state = MM_JPEG_ABORT_NONE;
+  p_session->encoding = OMX_FALSE;
+  pthread_mutex_unlock(&p_session->lock);
+
+  if (OMX_FALSE == p_session->config) {
+    ret = mm_jpegdec_session_configure(p_session);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+      goto error;
+    }
+    p_session->config = OMX_TRUE;
+  }
+
+  pthread_mutex_lock(&p_session->lock);
+  p_session->encoding = OMX_TRUE;
+  pthread_mutex_unlock(&p_session->lock);
+
+  MM_JPEG_CHK_ABORT(p_session, ret, error);
+
+  p_session->event_pending = OMX_TRUE;
+
+  ret = OMX_EmptyThisBuffer(p_session->omx_handle,
+    p_session->p_in_omx_buf[p_jobparams->src_index]);
+  if (ret) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    goto error;
+  }
+
+  // Wait for port settings changed
+  pthread_mutex_lock(&p_session->lock);
+  if (p_session->event_pending == OMX_TRUE) {
+    CDBG("%s:%d] before wait", __func__, __LINE__);
+    pthread_cond_wait(&p_session->cond, &p_session->lock);
+  }
+  lEvent = p_session->omxEvent;
+  CDBG("%s:%d] after wait", __func__, __LINE__);
+  pthread_mutex_unlock(&p_session->lock);
+
+  if (lEvent != OMX_EventPortSettingsChanged) {
+    CDBG("%s:%d] Unexpected event %d", __func__, __LINE__,lEvent);
+    goto error;
+  }
+
+  // Disable output port (wait)
+  mm_jpeg_session_port_disable(p_session,
+      p_session->outputPort.nPortIndex,
+      OMX_TRUE);
+
+  // Get port definition
+  ret = OMX_GetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+      &p_session->outputPort);
+  if (ret) {
+    CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+    return ret;
+  }
+
+  // Set port definition
+  p_session->outputPort.format.image.nFrameWidth =
+    (OMX_U32)p_jobparams->main_dim.dst_dim.width;
+  p_session->outputPort.format.image.nFrameHeight =
+    (OMX_U32)p_jobparams->main_dim.dst_dim.height;
+  p_session->outputPort.format.image.eColorFormat =
+    map_jpeg_format(p_params->color_format);
+
+  p_session->outputPort.nBufferSize =
+     p_params->dest_buf[p_jobparams->dst_index].buf_size;
+   p_session->outputPort.nBufferCountActual = (OMX_U32)p_params->num_dst_bufs;
+
+   p_session->outputPort.format.image.nSliceHeight =
+       (OMX_U32)
+       p_params->dest_buf[p_jobparams->dst_index].offset.mp[0].scanline;
+   p_session->outputPort.format.image.nStride =
+       p_params->dest_buf[p_jobparams->dst_index].offset.mp[0].stride;
+
+   ret = OMX_SetParameter(p_session->omx_handle, OMX_IndexParamPortDefinition,
+     &p_session->outputPort);
+   if (ret) {
+     CDBG_ERROR("%s:%d] failed", __func__, __LINE__);
+     return ret;
+   }
+
+  // Enable port (no wait)
+  mm_jpeg_session_port_enable(p_session,
+      p_session->outputPort.nPortIndex,
+      OMX_FALSE);
+
+  memset(&lbuffer_info, 0x0, sizeof(QOMX_BUFFER_INFO));
+  // Use buffers
+  for (i = 0; i < p_params->num_dst_bufs; i++) {
+    lbuffer_info.fd = (OMX_U32)p_params->dest_buf[i].fd;
+    CDBG("%s:%d] Dest buffer %d", __func__, __LINE__, (unsigned int)i);
+    ret = OMX_UseBuffer(p_session->omx_handle, &(p_session->p_out_omx_buf[i]),
+        1, &lbuffer_info, p_params->dest_buf[i].buf_size,
+        p_params->dest_buf[i].buf_vaddr);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+      return ret;
+    }
+  }
+
+  // Wait for port enable completion
+  pthread_mutex_lock(&p_session->lock);
+  if (p_session->event_pending == OMX_TRUE) {
+    CDBG("%s:%d] before wait", __func__, __LINE__);
+    pthread_cond_wait(&p_session->cond, &p_session->lock);
+    lEvent = p_session->omxEvent;
+    CDBG("%s:%d] after wait", __func__, __LINE__);
+  }
+  lEvent = p_session->omxEvent;
+  pthread_mutex_unlock(&p_session->lock);
+
+  if (lEvent != OMX_EventCmdComplete) {
+    CDBG("%s:%d] Unexpected event %d", __func__, __LINE__,lEvent);
+    goto error;
+  }
+
+  ret = OMX_FillThisBuffer(p_session->omx_handle,
+    p_session->p_out_omx_buf[p_jobparams->dst_index]);
+  if (ret) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    goto error;
+  }
+
+  MM_JPEG_CHK_ABORT(p_session, ret, error);
+
+error:
+
+  CDBG("%s:%d] X ", __func__, __LINE__);
+  return ret;
+}
+
+/** mm_jpegdec_process_decoding_job:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg client
+ *    @job_node: job node
+ *
+ *  Return:
+ *       0 for success -1 otherwise
+ *
+ *  Description:
+ *       Start the encoding job
+ *
+ **/
+int32_t mm_jpegdec_process_decoding_job(mm_jpeg_obj *my_obj, mm_jpeg_job_q_node_t* job_node)
+{
+  mm_jpeg_q_data_t qdata;
+  int32_t rc = 0;
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  mm_jpeg_job_session_t *p_session = NULL;
+
+  /* check if valid session */
+  p_session = mm_jpeg_get_session(my_obj, job_node->dec_info.job_id);
+  if (NULL == p_session) {
+    CDBG_ERROR("%s:%d] invalid job id %x", __func__, __LINE__,
+      job_node->dec_info.job_id);
+    return -1;
+  }
+
+  /* sent encode cmd to OMX, queue job into ongoing queue */
+  qdata.p = job_node;
+  rc = mm_jpeg_queue_enq(&my_obj->ongoing_job_q, qdata);
+  if (rc) {
+    CDBG_ERROR("%s:%d] jpeg enqueue failed %d",
+      __func__, __LINE__, ret);
+    goto error;
+  }
+
+  p_session->decode_job = job_node->dec_info.decode_job;
+  p_session->jobId = job_node->dec_info.job_id;
+  ret = mm_jpegdec_session_decode(p_session);
+  if (ret) {
+    CDBG_ERROR("%s:%d] encode session failed", __func__, __LINE__);
+    goto error;
+  }
+
+  CDBG("%s:%d] Success X ", __func__, __LINE__);
+  return rc;
+
+error:
+
+  if ((OMX_ErrorNone != ret) &&
+    (NULL != p_session->dec_params.jpeg_cb)) {
+    p_session->job_status = JPEG_JOB_STATUS_ERROR;
+    CDBG("%s:%d] send jpeg error callback %d", __func__, __LINE__,
+      p_session->job_status);
+    p_session->dec_params.jpeg_cb(p_session->job_status,
+      p_session->client_hdl,
+      p_session->jobId,
+      NULL,
+      p_session->dec_params.userdata);
+  }
+
+  /*remove the job*/
+  mm_jpegdec_job_done(p_session);
+  CDBG("%s:%d] Error X ", __func__, __LINE__);
+
+  return rc;
+}
+
+/** mm_jpeg_start_decode_job:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_hdl: client handle
+ *    @job: pointer to encode job
+ *    @jobId: job id
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Start the encoding job
+ *
+ **/
+int32_t mm_jpegdec_start_decode_job(mm_jpeg_obj *my_obj,
+  mm_jpeg_job_t *job,
+  uint32_t *job_id)
+{
+  mm_jpeg_q_data_t qdata;
+  int32_t rc = -1;
+  uint8_t session_idx = 0;
+  uint8_t client_idx = 0;
+  mm_jpeg_job_q_node_t* node = NULL;
+  mm_jpeg_job_session_t *p_session = NULL;
+  mm_jpeg_decode_job_t *p_jobparams  = &job->decode_job;
+
+  *job_id = 0;
+
+  /* check if valid session */
+  session_idx = GET_SESSION_IDX(p_jobparams->session_id);
+  client_idx = GET_CLIENT_IDX(p_jobparams->session_id);
+  CDBG("%s:%d] session_idx %d client idx %d", __func__, __LINE__,
+    session_idx, client_idx);
+
+  if ((session_idx >= MM_JPEG_MAX_SESSION) ||
+    (client_idx >= MAX_JPEG_CLIENT_NUM)) {
+    CDBG_ERROR("%s:%d] invalid session id %x", __func__, __LINE__,
+      job->decode_job.session_id);
+    return rc;
+  }
+
+  p_session = &my_obj->clnt_mgr[client_idx].session[session_idx];
+  if (OMX_FALSE == p_session->active) {
+    CDBG_ERROR("%s:%d] session not active %x", __func__, __LINE__,
+      job->decode_job.session_id);
+    return rc;
+  }
+
+  if ((p_jobparams->src_index >= (int32_t)p_session->dec_params.num_src_bufs) ||
+    (p_jobparams->dst_index >= (int32_t)p_session->dec_params.num_dst_bufs)) {
+    CDBG_ERROR("%s:%d] invalid buffer indices", __func__, __LINE__);
+    return rc;
+  }
+
+  /* enqueue new job into todo job queue */
+  node = (mm_jpeg_job_q_node_t *)malloc(sizeof(mm_jpeg_job_q_node_t));
+  if (NULL == node) {
+    CDBG_ERROR("%s: No memory for mm_jpeg_job_q_node_t", __func__);
+    return -1;
+  }
+
+  *job_id = job->decode_job.session_id |
+    ((p_session->job_hist++ % JOB_HIST_MAX) << 16);
+
+  memset(node, 0, sizeof(mm_jpeg_job_q_node_t));
+  node->dec_info.decode_job = job->decode_job;
+  node->dec_info.job_id = *job_id;
+  node->dec_info.client_handle = p_session->client_hdl;
+  node->type = MM_JPEG_CMD_TYPE_DECODE_JOB;
+
+  qdata.p = node;
+  rc = mm_jpeg_queue_enq(&my_obj->job_mgr.job_queue, qdata);
+  if (0 == rc) {
+    cam_sem_post(&my_obj->job_mgr.job_sem);
+  }
+
+  return rc;
+}
+
+/** mm_jpegdec_create_session:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_hdl: client handle
+ *    @p_params: pointer to encode params
+ *    @p_session_id: session id
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Start the encoding session
+ *
+ **/
+int32_t mm_jpegdec_create_session(mm_jpeg_obj *my_obj,
+  uint32_t client_hdl,
+  mm_jpeg_decode_params_t *p_params,
+  uint32_t* p_session_id)
+{
+  int32_t rc = 0;
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  uint8_t clnt_idx = 0;
+  int session_idx = -1;
+  mm_jpeg_job_session_t *p_session = NULL;
+  *p_session_id = 0;
+
+  /* validate the parameters */
+  if ((p_params->num_src_bufs > MM_JPEG_MAX_BUF)
+    || (p_params->num_dst_bufs > MM_JPEG_MAX_BUF)) {
+    CDBG_ERROR("%s:%d] invalid num buffers", __func__, __LINE__);
+    return rc;
+  }
+
+  /* check if valid client */
+  clnt_idx = mm_jpeg_util_get_index_by_handler(client_hdl);
+  if (clnt_idx >= MAX_JPEG_CLIENT_NUM) {
+    CDBG_ERROR("%s: invalid client with handler (%d)", __func__, client_hdl);
+    return rc;
+  }
+
+  session_idx = mm_jpeg_get_new_session_idx(my_obj, clnt_idx, &p_session);
+  if (session_idx < 0) {
+    CDBG_ERROR("%s:%d] invalid session id (%d)", __func__, __LINE__, session_idx);
+    return rc;
+  }
+
+  ret = mm_jpegdec_session_create(p_session);
+  if (OMX_ErrorNone != ret) {
+    p_session->active = OMX_FALSE;
+    CDBG_ERROR("%s:%d] jpeg session create failed", __func__, __LINE__);
+    return rc;
+  }
+
+  *p_session_id = (JOB_ID_MAGICVAL << 24) |
+    ((unsigned)session_idx << 8) | clnt_idx;
+
+  /*copy the params*/
+  p_session->dec_params = *p_params;
+  p_session->client_hdl = client_hdl;
+  p_session->sessionId = *p_session_id;
+  p_session->jpeg_obj = (void*)my_obj; /* save a ptr to jpeg_obj */
+  CDBG("%s:%d] session id %x", __func__, __LINE__, *p_session_id);
+
+  return rc;
+}
+
+/** mm_jpegdec_destroy_session:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @session_id: session index
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Destroy the encoding session
+ *
+ **/
+int32_t mm_jpegdec_destroy_session(mm_jpeg_obj *my_obj,
+  mm_jpeg_job_session_t *p_session)
+{
+  int32_t rc = 0;
+  mm_jpeg_job_q_node_t *node = NULL;
+
+  if (NULL == p_session) {
+    CDBG_ERROR("%s:%d] invalid session", __func__, __LINE__);
+    return rc;
+  }
+  uint32_t session_id = p_session->sessionId;
+  pthread_mutex_lock(&my_obj->job_lock);
+
+  /* abort job if in todo queue */
+  CDBG("%s:%d] abort todo jobs", __func__, __LINE__);
+  node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->job_mgr.job_queue, session_id);
+  while (NULL != node) {
+    free(node);
+    node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->job_mgr.job_queue, session_id);
+  }
+
+  /* abort job if in ongoing queue */
+  CDBG("%s:%d] abort ongoing jobs", __func__, __LINE__);
+  node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->ongoing_job_q, session_id);
+  while (NULL != node) {
+    free(node);
+    node = mm_jpeg_queue_remove_job_by_session_id(&my_obj->ongoing_job_q, session_id);
+  }
+
+  /* abort the current session */
+  mm_jpeg_session_abort(p_session);
+  mm_jpegdec_session_destroy(p_session);
+  mm_jpeg_remove_session_idx(my_obj, session_id);
+  pthread_mutex_unlock(&my_obj->job_lock);
+
+  /* wake up jobMgr thread to work on new job if there is any */
+  cam_sem_post(&my_obj->job_mgr.job_sem);
+  CDBG("%s:%d] X", __func__, __LINE__);
+
+  return rc;
+}
+
+/** mm_jpegdec_destroy_session_by_id:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @session_id: session index
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Destroy the encoding session
+ *
+ **/
+int32_t mm_jpegdec_destroy_session_by_id(mm_jpeg_obj *my_obj, uint32_t session_id)
+{
+  int32_t rc = 0;
+  mm_jpeg_job_session_t *p_session = mm_jpeg_get_session(my_obj, session_id);
+
+  if (NULL == p_session) {
+    CDBG_ERROR("%s:%d] session is not valid", __func__, __LINE__);
+    return rc;
+  }
+
+  return mm_jpegdec_destroy_session(my_obj, p_session);
+}
+
+
+
+OMX_ERRORTYPE mm_jpegdec_ebd(OMX_HANDLETYPE hComponent,
+  OMX_PTR pAppData,
+  OMX_BUFFERHEADERTYPE *pBuffer)
+{
+  mm_jpeg_job_session_t *p_session = (mm_jpeg_job_session_t *) pAppData;
+
+  CDBG("%s:%d] count %d ", __func__, __LINE__, p_session->ebd_count);
+  pthread_mutex_lock(&p_session->lock);
+  p_session->ebd_count++;
+  pthread_mutex_unlock(&p_session->lock);
+  return 0;
+}
+
+OMX_ERRORTYPE mm_jpegdec_fbd(OMX_HANDLETYPE hComponent,
+  OMX_PTR pAppData,
+  OMX_BUFFERHEADERTYPE *pBuffer)
+{
+  OMX_ERRORTYPE ret = OMX_ErrorNone;
+  mm_jpeg_job_session_t *p_session = (mm_jpeg_job_session_t *) pAppData;
+  mm_jpeg_output_t output_buf;
+
+  CDBG("%s:%d] count %d ", __func__, __LINE__, p_session->fbd_count);
+
+  pthread_mutex_lock(&p_session->lock);
+
+  if (MM_JPEG_ABORT_NONE != p_session->abort_state) {
+    pthread_mutex_unlock(&p_session->lock);
+    return ret;
+  }
+
+  p_session->fbd_count++;
+  if (NULL != p_session->dec_params.jpeg_cb) {
+    p_session->job_status = JPEG_JOB_STATUS_DONE;
+    output_buf.buf_filled_len = (uint32_t)pBuffer->nFilledLen;
+    output_buf.buf_vaddr = pBuffer->pBuffer;
+    output_buf.fd = -1;
+    CDBG("%s:%d] send jpeg callback %d", __func__, __LINE__,
+      p_session->job_status);
+    p_session->dec_params.jpeg_cb(p_session->job_status,
+      p_session->client_hdl,
+      p_session->jobId,
+      &output_buf,
+      p_session->dec_params.userdata);
+
+    /* remove from ready queue */
+    mm_jpegdec_job_done(p_session);
+  }
+  pthread_mutex_unlock(&p_session->lock);
+  CDBG("%s:%d] ", __func__, __LINE__);
+
+  return ret;
+}
+
+OMX_ERRORTYPE mm_jpegdec_event_handler(OMX_HANDLETYPE hComponent,
+  OMX_PTR pAppData,
+  OMX_EVENTTYPE eEvent,
+  OMX_U32 nData1,
+  OMX_U32 nData2,
+  OMX_PTR pEventData)
+{
+  mm_jpeg_job_session_t *p_session = (mm_jpeg_job_session_t *) pAppData;
+
+  CDBG("%s:%d] %d %d %d state %d", __func__, __LINE__, eEvent, (int)nData1,
+    (int)nData2, p_session->abort_state);
+
+  CDBG("%s:%d] AppData=%p ", __func__, __LINE__, pAppData);
+
+  pthread_mutex_lock(&p_session->lock);
+  p_session->omxEvent = eEvent;
+  if (MM_JPEG_ABORT_INIT == p_session->abort_state) {
+    p_session->abort_state = MM_JPEG_ABORT_DONE;
+    pthread_cond_signal(&p_session->cond);
+    pthread_mutex_unlock(&p_session->lock);
+    return OMX_ErrorNone;
+  }
+
+  if (eEvent == OMX_EventError) {
+    if (p_session->encoding == OMX_TRUE) {
+      CDBG("%s:%d] Error during encoding", __func__, __LINE__);
+
+      /* send jpeg callback */
+      if (NULL != p_session->dec_params.jpeg_cb) {
+        p_session->job_status = JPEG_JOB_STATUS_ERROR;
+        CDBG("%s:%d] send jpeg error callback %d", __func__, __LINE__,
+          p_session->job_status);
+        p_session->dec_params.jpeg_cb(p_session->job_status,
+          p_session->client_hdl,
+          p_session->jobId,
+          NULL,
+          p_session->dec_params.userdata);
+      }
+
+      /* remove from ready queue */
+      mm_jpegdec_job_done(p_session);
+    }
+    pthread_cond_signal(&p_session->cond);
+  } else if (eEvent == OMX_EventCmdComplete) {
+    p_session->state_change_pending = OMX_FALSE;
+    p_session->event_pending = OMX_FALSE;
+    pthread_cond_signal(&p_session->cond);
+  }  else if (eEvent == OMX_EventPortSettingsChanged) {
+    p_session->event_pending = OMX_FALSE;
+    pthread_cond_signal(&p_session->cond);
+  }
+
+  pthread_mutex_unlock(&p_session->lock);
+  CDBG("%s:%d]", __func__, __LINE__);
+  return OMX_ErrorNone;
+}
+
+/** mm_jpegdec_abort_job:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *    @client_hdl: client handle
+ *    @jobId: job id
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Abort the encoding session
+ *
+ **/
+int32_t mm_jpegdec_abort_job(mm_jpeg_obj *my_obj,
+  uint32_t jobId)
+{
+  int32_t rc = -1;
+  mm_jpeg_job_q_node_t *node = NULL;
+  mm_jpeg_job_session_t *p_session = NULL;
+
+  CDBG("%s:%d] ", __func__, __LINE__);
+  pthread_mutex_lock(&my_obj->job_lock);
+
+  /* abort job if in todo queue */
+  node = mm_jpeg_queue_remove_job_by_job_id(&my_obj->job_mgr.job_queue, jobId);
+  if (NULL != node) {
+    free(node);
+    goto abort_done;
+  }
+
+  /* abort job if in ongoing queue */
+  node = mm_jpeg_queue_remove_job_by_job_id(&my_obj->ongoing_job_q, jobId);
+  if (NULL != node) {
+    /* find job that is OMX ongoing, ask OMX to abort the job */
+    p_session = mm_jpeg_get_session(my_obj, node->dec_info.job_id);
+    if (p_session) {
+      mm_jpeg_session_abort(p_session);
+    } else {
+      CDBG_ERROR("%s:%d] Invalid job id 0x%x", __func__, __LINE__,
+        node->dec_info.job_id);
+    }
+    free(node);
+    goto abort_done;
+  }
+
+abort_done:
+  pthread_mutex_unlock(&my_obj->job_lock);
+
+  return rc;
+}
+/** mm_jpegdec_init:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Initializes the jpeg client
+ *
+ **/
+int32_t mm_jpegdec_init(mm_jpeg_obj *my_obj)
+{
+  int32_t rc = 0;
+
+  /* init locks */
+  pthread_mutex_init(&my_obj->job_lock, NULL);
+
+  /* init ongoing job queue */
+  rc = mm_jpeg_queue_init(&my_obj->ongoing_job_q);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    return -1;
+  }
+
+  /* init job semaphore and launch jobmgr thread */
+  CDBG("%s:%d] Launch jobmgr thread rc %d", __func__, __LINE__, rc);
+  rc = mm_jpeg_jobmgr_thread_launch(my_obj);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+    return -1;
+  }
+
+  /* load OMX */
+  if (OMX_ErrorNone != OMX_Init()) {
+    /* roll back in error case */
+    CDBG_ERROR("%s:%d] OMX_Init failed (%d)", __func__, __LINE__, rc);
+    mm_jpeg_jobmgr_thread_release(my_obj);
+    mm_jpeg_queue_deinit(&my_obj->ongoing_job_q);
+    pthread_mutex_destroy(&my_obj->job_lock);
+  }
+
+  return rc;
+}
+
+/** mm_jpegdec_deinit:
+ *
+ *  Arguments:
+ *    @my_obj: jpeg object
+ *
+ *  Return:
+ *       0 for success else failure
+ *
+ *  Description:
+ *       Deinits the jpeg client
+ *
+ **/
+int32_t mm_jpegdec_deinit(mm_jpeg_obj *my_obj)
+{
+  int32_t rc = 0;
+
+  /* release jobmgr thread */
+  rc = mm_jpeg_jobmgr_thread_release(my_obj);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+  }
+
+  /* unload OMX engine */
+  OMX_Deinit();
+
+  /* deinit ongoing job and cb queue */
+  rc = mm_jpeg_queue_deinit(&my_obj->ongoing_job_q);
+  if (0 != rc) {
+    CDBG_ERROR("%s:%d] Error", __func__, __LINE__);
+  }
+
+  /* destroy locks */
+  pthread_mutex_destroy(&my_obj->job_lock);
+
+  return rc;
+}
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpegdec_interface.c b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpegdec_interface.c
new file mode 100644
index 0000000..08c7d1d
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/src/mm_jpegdec_interface.c
@@ -0,0 +1,304 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <pthread.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include "mm_jpeg_dbg.h"
+#include "mm_jpeg_interface.h"
+#include "mm_jpeg.h"
+
+static pthread_mutex_t g_dec_intf_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static mm_jpeg_obj* g_jpegdec_obj = NULL;
+
+/** mm_jpeg_intf_start_job:
+ *
+ *  Arguments:
+ *    @client_hdl: client handle
+ *    @job: jpeg job object
+ *    @jobId: job id
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       start the jpeg job
+ *
+ **/
+static int32_t mm_jpegdec_intf_start_job(mm_jpeg_job_t* job, uint32_t* job_id)
+{
+  int32_t rc = -1;
+
+  if (NULL == job ||
+    NULL == job_id) {
+    CDBG_ERROR("%s:%d] invalid parameters for job or jobId", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_dec_intf_lock);
+  if (NULL == g_jpegdec_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_dec_intf_lock);
+    return rc;
+  }
+  rc = mm_jpegdec_start_decode_job(g_jpegdec_obj, job, job_id);
+  pthread_mutex_unlock(&g_dec_intf_lock);
+  return rc;
+}
+
+/** mm_jpeg_intf_create_session:
+ *
+ *  Arguments:
+ *    @client_hdl: client handle
+ *    @p_params: encode parameters
+ *    @p_session_id: session id
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       Create new jpeg session
+ *
+ **/
+static int32_t mm_jpegdec_intf_create_session(uint32_t client_hdl,
+    mm_jpeg_decode_params_t *p_params,
+    uint32_t *p_session_id)
+{
+  int32_t rc = -1;
+
+  if (0 == client_hdl || NULL == p_params || NULL == p_session_id) {
+    CDBG_ERROR("%s:%d] invalid client_hdl or jobId", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_dec_intf_lock);
+  if (NULL == g_jpegdec_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_dec_intf_lock);
+    return rc;
+  }
+
+  rc = mm_jpegdec_create_session(g_jpegdec_obj, client_hdl, p_params, p_session_id);
+  pthread_mutex_unlock(&g_dec_intf_lock);
+  return rc;
+}
+
+/** mm_jpeg_intf_destroy_session:
+ *
+ *  Arguments:
+ *    @session_id: session id
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       Destroy jpeg session
+ *
+ **/
+static int32_t mm_jpegdec_intf_destroy_session(uint32_t session_id)
+{
+  int32_t rc = -1;
+
+  if (0 == session_id) {
+    CDBG_ERROR("%s:%d] invalid client_hdl or jobId", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_dec_intf_lock);
+  if (NULL == g_jpegdec_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_dec_intf_lock);
+    return rc;
+  }
+
+  rc = mm_jpegdec_destroy_session_by_id(g_jpegdec_obj, session_id);
+  pthread_mutex_unlock(&g_dec_intf_lock);
+  return rc;
+}
+
+/** mm_jpegdec_intf_abort_job:
+ *
+ *  Arguments:
+ *    @jobId: job id
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       Abort the jpeg job
+ *
+ **/
+static int32_t mm_jpegdec_intf_abort_job(uint32_t job_id)
+{
+  int32_t rc = -1;
+
+  if (0 == job_id) {
+    CDBG_ERROR("%s:%d] invalid jobId", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_dec_intf_lock);
+  if (NULL == g_jpegdec_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_dec_intf_lock);
+    return rc;
+  }
+
+  rc = mm_jpegdec_abort_job(g_jpegdec_obj, job_id);
+  pthread_mutex_unlock(&g_dec_intf_lock);
+  return rc;
+}
+
+/** mm_jpeg_intf_close:
+ *
+ *  Arguments:
+ *    @client_hdl: client handle
+ *
+ *  Return:
+ *       0 success, failure otherwise
+ *
+ *  Description:
+ *       Close the jpeg job
+ *
+ **/
+static int32_t mm_jpegdec_intf_close(uint32_t client_hdl)
+{
+  int32_t rc = -1;
+
+  if (0 == client_hdl) {
+    CDBG_ERROR("%s:%d] invalid client_hdl", __func__, __LINE__);
+    return rc;
+  }
+
+  pthread_mutex_lock(&g_dec_intf_lock);
+  if (NULL == g_jpegdec_obj) {
+    /* mm_jpeg obj not exists, return error */
+    CDBG_ERROR("%s:%d] mm_jpeg is not opened yet", __func__, __LINE__);
+    pthread_mutex_unlock(&g_dec_intf_lock);
+    return rc;
+  }
+
+  rc = mm_jpeg_close(g_jpegdec_obj, client_hdl);
+  g_jpegdec_obj->num_clients--;
+  if(0 == rc) {
+    if (0 == g_jpegdec_obj->num_clients) {
+      /* No client, close jpeg internally */
+      rc = mm_jpegdec_deinit(g_jpegdec_obj);
+      free(g_jpegdec_obj);
+      g_jpegdec_obj = NULL;
+    }
+  }
+
+  pthread_mutex_unlock(&g_dec_intf_lock);
+  return rc;
+}
+
+
+
+/** jpegdec_open:
+ *
+ *  Arguments:
+ *    @ops: ops table pointer
+ *
+ *  Return:
+ *       0 failure, success otherwise
+ *
+ *  Description:
+ *       Open a jpeg client
+ *
+ **/
+uint32_t jpegdec_open(mm_jpegdec_ops_t *ops)
+{
+  int32_t rc = 0;
+  uint32_t clnt_hdl = 0;
+  mm_jpeg_obj* jpeg_obj = NULL;
+
+  pthread_mutex_lock(&g_dec_intf_lock);
+  /* first time open */
+  if(NULL == g_jpegdec_obj) {
+    jpeg_obj = (mm_jpeg_obj *)malloc(sizeof(mm_jpeg_obj));
+    if(NULL == jpeg_obj) {
+      CDBG_ERROR("%s:%d] no mem", __func__, __LINE__);
+      pthread_mutex_unlock(&g_dec_intf_lock);
+      return clnt_hdl;
+    }
+
+    /* initialize jpeg obj */
+    memset(jpeg_obj, 0, sizeof(mm_jpeg_obj));
+    rc = mm_jpegdec_init(jpeg_obj);
+    if(0 != rc) {
+      CDBG_ERROR("%s:%d] mm_jpeg_init err = %d", __func__, __LINE__, rc);
+      free(jpeg_obj);
+      pthread_mutex_unlock(&g_dec_intf_lock);
+      return clnt_hdl;
+    }
+
+    /* remember in global variable */
+    g_jpegdec_obj = jpeg_obj;
+  }
+
+  /* open new client */
+  clnt_hdl = mm_jpeg_new_client(g_jpegdec_obj);
+  if (clnt_hdl > 0) {
+    /* valid client */
+    if (NULL != ops) {
+      /* fill in ops tbl if ptr not NULL */
+      ops->start_job = mm_jpegdec_intf_start_job;
+      ops->abort_job = mm_jpegdec_intf_abort_job;
+      ops->create_session = mm_jpegdec_intf_create_session;
+      ops->destroy_session = mm_jpegdec_intf_destroy_session;
+      ops->close = mm_jpegdec_intf_close;
+    }
+  } else {
+    /* failed new client */
+    CDBG_ERROR("%s:%d] mm_jpeg_new_client failed", __func__, __LINE__);
+
+    if (0 == g_jpegdec_obj->num_clients) {
+      /* no client, close jpeg */
+      mm_jpegdec_deinit(g_jpegdec_obj);
+      free(g_jpegdec_obj);
+      g_jpegdec_obj = NULL;
+    }
+  }
+
+  pthread_mutex_unlock(&g_dec_intf_lock);
+  return clnt_hdl;
+}
+
+
+
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/test/Android.mk b/camera/QCamera2/stack/mm-jpeg-interface/test/Android.mk
new file mode 100644
index 0000000..bc16678
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/test/Android.mk
@@ -0,0 +1,79 @@
+#encoder int test
+OLD_LOCAL_PATH := $(LOCAL_PATH)
+MM_JPEG_TEST_PATH := $(call my-dir)
+
+include $(LOCAL_PATH)/../../common.mk
+include $(CLEAR_VARS)
+LOCAL_PATH := $(MM_JPEG_TEST_PATH)
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_CFLAGS := -DCAMERA_ION_HEAP_ID=ION_IOMMU_HEAP_ID
+LOCAL_CFLAGS += -Wall -Wextra -Werror -Wno-unused-parameter
+LOCAL_CFLAGS += -D_ANDROID_
+
+ifeq ($(strip $(TARGET_USES_ION)),true)
+LOCAL_CFLAGS += -DUSE_ION
+endif
+
+OMX_HEADER_DIR := frameworks/native/include/media/openmax
+OMX_CORE_DIR := hardware/qcom/camera/mm-image-codec
+
+LOCAL_C_INCLUDES := $(MM_JPEG_TEST_PATH)
+LOCAL_C_INCLUDES += $(MM_JPEG_TEST_PATH)/../inc
+LOCAL_C_INCLUDES += $(MM_JPEG_TEST_PATH)/../../common
+LOCAL_C_INCLUDES += $(OMX_HEADER_DIR)
+LOCAL_C_INCLUDES += $(OMX_CORE_DIR)/qexif
+LOCAL_C_INCLUDES += $(OMX_CORE_DIR)/qomx_core
+
+LOCAL_C_INCLUDES+= $(kernel_includes)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(common_deps)
+
+LOCAL_SRC_FILES := mm_jpeg_test.c
+
+LOCAL_32_BIT_ONLY := $(BOARD_QTI_CAMERA_32BIT_ONLY)
+LOCAL_MODULE           := mm-jpeg-interface-test
+LOCAL_PRELINK_MODULE   := false
+LOCAL_SHARED_LIBRARIES := libcutils libdl libmmjpeg_interface
+
+include $(BUILD_EXECUTABLE)
+
+
+
+#decoder int test
+
+include $(CLEAR_VARS)
+LOCAL_PATH := $(MM_JPEG_TEST_PATH)
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_CFLAGS := -DCAMERA_ION_HEAP_ID=ION_IOMMU_HEAP_ID
+LOCAL_CFLAGS += -Wall -Wextra -Werror -Wno-unused-parameter
+
+LOCAL_CFLAGS += -D_ANDROID_
+
+ifeq ($(strip $(TARGET_USES_ION)),true)
+LOCAL_CFLAGS += -DUSE_ION
+endif
+
+OMX_HEADER_DIR := frameworks/native/include/media/openmax
+OMX_CORE_DIR := hardware/qcom/camera/mm-image-codec
+
+LOCAL_C_INCLUDES := $(MM_JPEG_TEST_PATH)
+LOCAL_C_INCLUDES += $(MM_JPEG_TEST_PATH)/../inc
+LOCAL_C_INCLUDES += $(MM_JPEG_TEST_PATH)/../../common
+LOCAL_C_INCLUDES += $(OMX_HEADER_DIR)
+LOCAL_C_INCLUDES += $(OMX_CORE_DIR)/qexif
+LOCAL_C_INCLUDES += $(OMX_CORE_DIR)/qomx_core
+
+LOCAL_C_INCLUDES+= $(kernel_includes)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(common_deps)
+
+LOCAL_SRC_FILES := mm_jpegdec_test.c
+
+LOCAL_32_BIT_ONLY := $(BOARD_QTI_CAMERA_32BIT_ONLY)
+LOCAL_MODULE           := mm-jpegdec-interface-test
+LOCAL_PRELINK_MODULE   := false
+LOCAL_SHARED_LIBRARIES := libcutils libdl libmmjpeg_interface
+
+include $(BUILD_EXECUTABLE)
+
+LOCAL_PATH := $(OLD_LOCAL_PATH)
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/test/mm_jpeg_test.c b/camera/QCamera2/stack/mm-jpeg-interface/test/mm_jpeg_test.c
new file mode 100644
index 0000000..72ce3a6
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/test/mm_jpeg_test.c
@@ -0,0 +1,659 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "mm_jpeg_interface.h"
+#include "mm_jpeg_ionbuf.h"
+#include <sys/time.h>
+#include <stdlib.h>
+
+#define MAX_NUM_BUFS (12)
+
+/** DUMP_TO_FILE:
+ *  @filename: file name
+ *  @p_addr: address of the buffer
+ *  @len: buffer length
+ *
+ *  dump the image to the file
+ **/
+#define DUMP_TO_FILE(filename, p_addr, len) ({ \
+  FILE *fp = fopen(filename, "w+"); \
+  if (fp) { \
+    fwrite(p_addr, 1, len, fp); \
+    fclose(fp); \
+  } else { \
+    CDBG_ERROR("%s:%d] cannot dump image", __func__, __LINE__); \
+  } \
+})
+
+static uint32_t g_count = 1U, g_i;
+
+typedef struct {
+  mm_jpeg_color_format fmt;
+  cam_rational_type_t mult;
+  const char *str;
+} mm_jpeg_intf_test_colfmt_t;
+
+typedef struct {
+  char *filename;
+  int width;
+  int height;
+  char *out_filename;
+  uint32_t burst_mode;
+  uint32_t min_out_bufs;
+  mm_jpeg_intf_test_colfmt_t col_fmt;
+  uint32_t encode_thumbnail;
+  int tmb_width;
+  int tmb_height;
+  int main_quality;
+  int thumb_quality;
+} jpeg_test_input_t;
+
+/* Static constants */
+/*  default Luma Qtable */
+const uint8_t DEFAULT_QTABLE_0[QUANT_SIZE] = {
+  16, 11, 10, 16, 24, 40, 51, 61,
+  12, 12, 14, 19, 26, 58, 60, 55,
+  14, 13, 16, 24, 40, 57, 69, 56,
+  14, 17, 22, 29, 51, 87, 80, 62,
+  18, 22, 37, 56, 68, 109, 103, 77,
+  24, 35, 55, 64, 81, 104, 113, 92,
+  49, 64, 78, 87, 103, 121, 120, 101,
+  72, 92, 95, 98, 112, 100, 103, 99
+};
+
+/*  default Chroma Qtable */
+const uint8_t DEFAULT_QTABLE_1[QUANT_SIZE] = {
+  17, 18, 24, 47, 99, 99, 99, 99,
+  18, 21, 26, 66, 99, 99, 99, 99,
+  24, 26, 56, 99, 99, 99, 99, 99,
+  47, 66, 99, 99, 99, 99, 99, 99,
+  99, 99, 99, 99, 99, 99, 99, 99,
+  99, 99, 99, 99, 99, 99, 99, 99,
+  99, 99, 99, 99, 99, 99, 99, 99,
+  99, 99, 99, 99, 99, 99, 99, 99
+};
+
+typedef struct {
+  char *filename[MAX_NUM_BUFS];
+  int width;
+  int height;
+  char *out_filename[MAX_NUM_BUFS];
+  pthread_mutex_t lock;
+  pthread_cond_t cond;
+  buffer_t input[MAX_NUM_BUFS];
+  buffer_t output[MAX_NUM_BUFS];
+  int use_ion;
+  uint32_t handle;
+  mm_jpeg_ops_t ops;
+  uint32_t job_id[MAX_NUM_BUFS];
+  mm_jpeg_encode_params_t params;
+  mm_jpeg_job_t job;
+  uint32_t session_id;
+  uint32_t num_bufs;
+  uint32_t min_out_bufs;
+  size_t buf_filled_len[MAX_NUM_BUFS];
+} mm_jpeg_intf_test_t;
+
+
+
+static const mm_jpeg_intf_test_colfmt_t color_formats[] =
+{
+  { MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2, {3, 2}, "YCRCBLP_H2V2" },
+  { MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2, {3, 2}, "YCBCRLP_H2V2" },
+  { MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V1, {2, 1}, "YCRCBLP_H2V1" },
+  { MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V1, {2, 1}, "YCBCRLP_H2V1" },
+  { MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V2, {2, 1}, "YCRCBLP_H1V2" },
+  { MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V2, {2, 1}, "YCBCRLP_H1V2" },
+  { MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V1, {3, 1}, "YCRCBLP_H1V1" },
+  { MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V1, {3, 1}, "YCBCRLP_H1V1" }
+};
+
+static jpeg_test_input_t jpeg_input[] = {
+  { QCAMERA_DUMP_FRM_LOCATION"test_1.yuv", 4000, 3008, QCAMERA_DUMP_FRM_LOCATION"test_1.jpg", 0, 0,
+  { MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2, {3, 2}, "YCRCBLP_H2V2" }, 0, 320, 240, 80, 80}
+};
+
+static void mm_jpeg_encode_callback(jpeg_job_status_t status,
+  uint32_t client_hdl,
+  uint32_t jobId,
+  mm_jpeg_output_t *p_output,
+  void *userData)
+{
+  mm_jpeg_intf_test_t *p_obj = (mm_jpeg_intf_test_t *)userData;
+
+  pthread_mutex_lock(&p_obj->lock);
+
+  if (status == JPEG_JOB_STATUS_ERROR) {
+    CDBG_ERROR("%s:%d] Encode error", __func__, __LINE__);
+  } else {
+    int i = 0;
+    for (i = 0; p_obj->job_id[i] && (jobId != p_obj->job_id[i]); i++)
+      ;
+    if (!p_obj->job_id[i]) {
+      CDBG_ERROR("%s:%d] Cannot find job ID!!!", __func__, __LINE__);
+      goto error;
+    }
+    CDBG_ERROR("%s:%d] Encode success addr %p len %zu idx %d",
+      __func__, __LINE__, p_output->buf_vaddr, p_output->buf_filled_len, i);
+
+    p_obj->buf_filled_len[i] = p_output->buf_filled_len;
+    if (p_obj->min_out_bufs) {
+      CDBG_ERROR("%s:%d] Saving file%s addr %p len %zu",
+          __func__, __LINE__, p_obj->out_filename[i],
+          p_output->buf_vaddr, p_output->buf_filled_len);
+
+      DUMP_TO_FILE(p_obj->out_filename[i], p_output->buf_vaddr,
+        p_output->buf_filled_len);
+    }
+  }
+  g_i++;
+
+error:
+
+  if (g_i >= g_count) {
+    CDBG_ERROR("%s:%d] Signal the thread", __func__, __LINE__);
+    pthread_cond_signal(&p_obj->cond);
+  }
+  pthread_mutex_unlock(&p_obj->lock);
+}
+
+int mm_jpeg_test_alloc(buffer_t *p_buffer, int use_pmem)
+{
+  int ret = 0;
+  /*Allocate buffers*/
+  if (use_pmem) {
+    p_buffer->addr = (uint8_t *)buffer_allocate(p_buffer, 0);
+    if (NULL == p_buffer->addr) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      return -1;
+    }
+  } else {
+    /* Allocate heap memory */
+    p_buffer->addr = (uint8_t *)malloc(p_buffer->size);
+    if (NULL == p_buffer->addr) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      return -1;
+    }
+  }
+  return ret;
+}
+
+void mm_jpeg_test_free(buffer_t *p_buffer)
+{
+  if (p_buffer->addr == NULL)
+    return;
+
+  if (p_buffer->p_pmem_fd >= 0)
+    buffer_deallocate(p_buffer);
+  else
+    free(p_buffer->addr);
+
+  memset(p_buffer, 0x0, sizeof(buffer_t));
+}
+
+int mm_jpeg_test_read(mm_jpeg_intf_test_t *p_obj, uint32_t idx)
+{
+  FILE *fp = NULL;
+  size_t file_size = 0;
+  fp = fopen(p_obj->filename[idx], "rb");
+  if (!fp) {
+    CDBG_ERROR("%s:%d] error", __func__, __LINE__);
+    return -1;
+  }
+  fseek(fp, 0, SEEK_END);
+  file_size = (size_t)ftell(fp);
+  fseek(fp, 0, SEEK_SET);
+  CDBG_ERROR("%s:%d] input file size is %zu buf_size %zu",
+    __func__, __LINE__, file_size, p_obj->input[idx].size);
+
+  if (p_obj->input[idx].size > file_size) {
+    CDBG_ERROR("%s:%d] error", __func__, __LINE__);
+    fclose(fp);
+    return -1;
+  }
+  fread(p_obj->input[idx].addr, 1, p_obj->input[idx].size, fp);
+  fclose(fp);
+  return 0;
+}
+
+static int encode_init(jpeg_test_input_t *p_input, mm_jpeg_intf_test_t *p_obj)
+{
+  int rc = -1;
+  size_t size = (size_t)(p_input->width * p_input->height);
+  mm_jpeg_encode_params_t *p_params = &p_obj->params;
+  mm_jpeg_encode_job_t *p_job_params = &p_obj->job.encode_job;
+  uint32_t i = 0;
+  uint32_t burst_mode = p_input->burst_mode;
+  jpeg_test_input_t *p_in = p_input;
+
+  do {
+    p_obj->filename[i] = p_in->filename;
+    p_obj->width = p_input->width;
+    p_obj->height = p_input->height;
+    p_obj->out_filename[i] = p_in->out_filename;
+    p_obj->use_ion = 1;
+    p_obj->min_out_bufs = p_input->min_out_bufs;
+
+    /* allocate buffers */
+    p_obj->input[i].size = size * (size_t)p_input->col_fmt.mult.numerator /
+        (size_t)p_input->col_fmt.mult.denominator;
+    rc = mm_jpeg_test_alloc(&p_obj->input[i], p_obj->use_ion);
+    if (rc) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      return -1;
+    }
+
+
+    rc = mm_jpeg_test_read(p_obj, i);
+    if (rc) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      return -1;
+    }
+
+    /* src buffer config*/
+    p_params->src_main_buf[i].buf_size = p_obj->input[i].size;
+    p_params->src_main_buf[i].buf_vaddr = p_obj->input[i].addr;
+    p_params->src_main_buf[i].fd = p_obj->input[i].p_pmem_fd;
+    p_params->src_main_buf[i].index = i;
+    p_params->src_main_buf[i].format = MM_JPEG_FMT_YUV;
+    p_params->src_main_buf[i].offset.mp[0].len = (uint32_t)size;
+    p_params->src_main_buf[i].offset.mp[0].stride = p_input->width;
+    p_params->src_main_buf[i].offset.mp[0].scanline = p_input->height;
+    p_params->src_main_buf[i].offset.mp[1].len = (uint32_t)(size >> 1);
+
+    /* src buffer config*/
+    p_params->src_thumb_buf[i].buf_size = p_obj->input[i].size;
+    p_params->src_thumb_buf[i].buf_vaddr = p_obj->input[i].addr;
+    p_params->src_thumb_buf[i].fd = p_obj->input[i].p_pmem_fd;
+    p_params->src_thumb_buf[i].index = i;
+    p_params->src_thumb_buf[i].format = MM_JPEG_FMT_YUV;
+    p_params->src_thumb_buf[i].offset.mp[0].len = (uint32_t)size;
+    p_params->src_thumb_buf[i].offset.mp[0].stride = p_input->width;
+    p_params->src_thumb_buf[i].offset.mp[0].scanline = p_input->height;
+    p_params->src_thumb_buf[i].offset.mp[1].len = (uint32_t)(size >> 1);
+
+
+    i++;
+  } while((++p_in)->filename);
+
+  p_obj->num_bufs = i;
+
+  pthread_mutex_init(&p_obj->lock, NULL);
+  pthread_cond_init(&p_obj->cond, NULL);
+
+
+  /* set encode parameters */
+  p_params->jpeg_cb = mm_jpeg_encode_callback;
+  p_params->userdata = p_obj;
+  p_params->color_format = p_input->col_fmt.fmt;
+  p_params->thumb_color_format = p_input->col_fmt.fmt;
+
+  if (p_obj->min_out_bufs) {
+    p_params->num_dst_bufs = 2;
+  } else {
+    p_params->num_dst_bufs = p_obj->num_bufs;
+  }
+
+  for (i = 0; i < (uint32_t)p_params->num_dst_bufs; i++) {
+    p_obj->output[i].size = size * 3/2;
+    rc = mm_jpeg_test_alloc(&p_obj->output[i], 0);
+    if (rc) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      return -1;
+    }
+    /* dest buffer config */
+    p_params->dest_buf[i].buf_size = p_obj->output[i].size;
+    p_params->dest_buf[i].buf_vaddr = p_obj->output[i].addr;
+    p_params->dest_buf[i].fd = p_obj->output[i].p_pmem_fd;
+    p_params->dest_buf[i].index = i;
+  }
+
+
+  p_params->num_src_bufs = p_obj->num_bufs;
+  p_params->num_tmb_bufs = 0;
+  g_count = p_params->num_src_bufs;
+
+  p_params->encode_thumbnail = p_input->encode_thumbnail;
+  if (p_params->encode_thumbnail) {
+      p_params->num_tmb_bufs = p_obj->num_bufs;
+  }
+  p_params->quality = (uint32_t)p_input->main_quality;
+  p_params->thumb_quality = (uint32_t)p_input->thumb_quality;
+
+  p_job_params->dst_index = 0;
+  p_job_params->src_index = 0;
+  p_job_params->rotation = 0;
+
+  /* main dimension */
+  p_job_params->main_dim.src_dim.width = p_obj->width;
+  p_job_params->main_dim.src_dim.height = p_obj->height;
+  p_job_params->main_dim.dst_dim.width = p_obj->width;
+  p_job_params->main_dim.dst_dim.height = p_obj->height;
+  p_job_params->main_dim.crop.top = 0;
+  p_job_params->main_dim.crop.left = 0;
+  p_job_params->main_dim.crop.width = p_obj->width;
+  p_job_params->main_dim.crop.height = p_obj->height;
+
+  p_params->main_dim  = p_job_params->main_dim;
+
+  /* thumb dimension */
+  p_job_params->thumb_dim.src_dim.width = p_obj->width;
+  p_job_params->thumb_dim.src_dim.height = p_obj->height;
+  p_job_params->thumb_dim.dst_dim.width = p_input->tmb_width;
+  p_job_params->thumb_dim.dst_dim.height = p_input->tmb_height;
+  p_job_params->thumb_dim.crop.top = 0;
+  p_job_params->thumb_dim.crop.left = 0;
+  p_job_params->thumb_dim.crop.width = 0;
+  p_job_params->thumb_dim.crop.height = 0;
+
+  p_params->thumb_dim  = p_job_params->thumb_dim;
+
+  p_job_params->exif_info.numOfEntries = 0;
+  p_params->burst_mode = burst_mode;
+
+  /* Qtable */
+  p_job_params->qtable[0].eQuantizationTable =
+    OMX_IMAGE_QuantizationTableLuma;
+  p_job_params->qtable[1].eQuantizationTable =
+    OMX_IMAGE_QuantizationTableChroma;
+  p_job_params->qtable_set[0] = 1;
+  p_job_params->qtable_set[1] = 1;
+
+  for (i = 0; i < QUANT_SIZE; i++) {
+    p_job_params->qtable[0].nQuantizationMatrix[i] = DEFAULT_QTABLE_0[i];
+    p_job_params->qtable[1].nQuantizationMatrix[i] = DEFAULT_QTABLE_1[i];
+  }
+
+  return 0;
+}
+
+static int encode_test(jpeg_test_input_t *p_input)
+{
+  int rc = 0;
+  mm_jpeg_intf_test_t jpeg_obj;
+  uint32_t i = 0;
+
+  memset(&jpeg_obj, 0x0, sizeof(jpeg_obj));
+  rc = encode_init(p_input, &jpeg_obj);
+  if (rc) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    return -1;
+  }
+
+  mm_dimension pic_size;
+  memset(&pic_size, 0, sizeof(mm_dimension));
+  pic_size.w = (uint32_t)p_input->width;
+  pic_size.h = (uint32_t)p_input->height;
+
+  jpeg_obj.handle = jpeg_open(&jpeg_obj.ops, pic_size);
+  if (jpeg_obj.handle == 0) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    goto end;
+  }
+
+  rc = jpeg_obj.ops.create_session(jpeg_obj.handle, &jpeg_obj.params,
+    &jpeg_obj.job.encode_job.session_id);
+  if (jpeg_obj.job.encode_job.session_id == 0) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    goto end;
+  }
+
+  for (i = 0; i < jpeg_obj.num_bufs; i++) {
+    jpeg_obj.job.job_type = JPEG_JOB_TYPE_ENCODE;
+    jpeg_obj.job.encode_job.src_index = (int32_t) i;
+    jpeg_obj.job.encode_job.dst_index = (int32_t) i;
+    jpeg_obj.job.encode_job.thumb_index = (uint32_t) i;
+
+    if (jpeg_obj.params.burst_mode && jpeg_obj.min_out_bufs) {
+      jpeg_obj.job.encode_job.dst_index = -1;
+    }
+
+    rc = jpeg_obj.ops.start_job(&jpeg_obj.job, &jpeg_obj.job_id[i]);
+
+    if (rc) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      goto end;
+    }
+  }
+  jpeg_obj.job_id[i] = 0;
+
+  /*
+  usleep(5);
+  jpeg_obj.ops.abort_job(jpeg_obj.job_id[0]);
+  */
+  pthread_mutex_lock(&jpeg_obj.lock);
+  pthread_cond_wait(&jpeg_obj.cond, &jpeg_obj.lock);
+  pthread_mutex_unlock(&jpeg_obj.lock);
+
+
+  jpeg_obj.ops.destroy_session(jpeg_obj.job.encode_job.session_id);
+  jpeg_obj.ops.close(jpeg_obj.handle);
+
+end:
+  for (i = 0; i < jpeg_obj.num_bufs; i++) {
+    if (!jpeg_obj.min_out_bufs) {
+      // Save output files
+      CDBG_ERROR("%s:%d] Saving file%s addr %p len %zu",
+          __func__, __LINE__,jpeg_obj.out_filename[i],
+          jpeg_obj.output[i].addr, jpeg_obj.buf_filled_len[i]);
+
+      DUMP_TO_FILE(jpeg_obj.out_filename[i], jpeg_obj.output[i].addr,
+        jpeg_obj.buf_filled_len[i]);
+    }
+    mm_jpeg_test_free(&jpeg_obj.input[i]);
+    mm_jpeg_test_free(&jpeg_obj.output[i]);
+  }
+  return 0;
+}
+
+#define MAX_FILE_CNT (20)
+static int mm_jpeg_test_get_input(int argc, char *argv[],
+    jpeg_test_input_t *p_test)
+{
+  int c;
+  size_t in_file_cnt = 0, out_file_cnt = 0, i;
+  int idx = 0;
+  jpeg_test_input_t *p_test_base = p_test;
+
+  char *in_files[MAX_FILE_CNT];
+  char *out_files[MAX_FILE_CNT];
+
+  while ((c = getopt(argc, argv, "-I:O:W:H:F:BTx:y:Q:q:")) != -1) {
+    switch (c) {
+    case 'B':
+      fprintf(stderr, "%-25s\n", "Using burst mode");
+      p_test->burst_mode = 1;
+      break;
+    case 'I':
+      for (idx = optind - 1; idx < argc; idx++) {
+        if (argv[idx][0] == '-') {
+          break;
+        }
+        in_files[in_file_cnt++] = argv[idx];
+      }
+      optind = idx -1;
+
+      break;
+    case 'O':
+      for (idx = optind - 1; idx < argc; idx++) {
+        if (argv[idx][0] == '-') {
+          break;
+        }
+        out_files[out_file_cnt++] = argv[idx];
+      }
+      optind = idx -1;
+
+      break;
+    case 'W':
+      p_test->width = atoi(optarg);
+      fprintf(stderr, "%-25s%d\n", "Width: ", p_test->width);
+      break;
+    case 'H':
+      p_test->height = atoi(optarg);
+      fprintf(stderr, "%-25s%d\n", "Height: ", p_test->height);
+      break;
+    case 'F':
+      p_test->col_fmt = color_formats[atoi(optarg)];
+      fprintf(stderr, "%-25s%s\n", "Format: ", p_test->col_fmt.str);
+      break;
+    case 'M':
+      p_test->min_out_bufs = 1;
+      fprintf(stderr, "%-25s\n", "Using minimum number of output buffers");
+      break;
+    case 'T':
+      p_test->encode_thumbnail = 1;
+      fprintf(stderr, "%-25s\n", "Encode thumbnail");
+      break;
+    case 'x':
+      p_test->tmb_width = atoi(optarg);
+      fprintf(stderr, "%-25s%d\n", "Tmb Width: ", p_test->tmb_width);
+      break;
+    case 'y':
+      p_test->tmb_height = atoi(optarg);
+      fprintf(stderr, "%-25s%d\n", "Tmb Height: ", p_test->tmb_height);
+      break;
+    case 'Q':
+      p_test->main_quality = atoi(optarg);
+      fprintf(stderr, "%-25s%d\n", "Main quality: ", p_test->main_quality);
+      break;
+    case 'q':
+      p_test->thumb_quality = atoi(optarg);
+      fprintf(stderr, "%-25s%d\n", "Thumb quality: ", p_test->thumb_quality);
+      break;
+    default:;
+    }
+  }
+  fprintf(stderr, "Infiles: %zu Outfiles: %zu\n", in_file_cnt, out_file_cnt);
+
+  if (in_file_cnt > out_file_cnt) {
+    fprintf(stderr, "%-25s\n", "Insufficient number of output files!");
+    return 1;
+  }
+
+  // Discard the extra out files
+  out_file_cnt = in_file_cnt;
+
+  p_test = realloc(p_test, (in_file_cnt + 1) * sizeof(*p_test));
+  if (!p_test) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    return 1;
+  }
+  memset(p_test+1, 0, (in_file_cnt) * sizeof(*p_test));
+
+  for (i = 0; i < in_file_cnt; i++, p_test++) {
+    memcpy(p_test, p_test_base, sizeof(*p_test));
+    p_test->filename = in_files[i];
+    p_test->out_filename = out_files[i];
+    fprintf(stderr, "Inf: %s Outf: %s\n", in_files[i], out_files[i]);
+  }
+
+
+  return 0;
+}
+
+static void mm_jpeg_test_print_usage()
+{
+  fprintf(stderr, "Usage: program_name [options]\n");
+  fprintf(stderr, "Mandatory options:\n");
+  fprintf(stderr, "  -I FILE1 [FILE2] [FILEN]\tList of input files\n");
+  fprintf(stderr, "  -O FILE1 [FILE2] [FILEN]\tList of output files\n");
+  fprintf(stderr, "  -W WIDTH\t\tOutput image width\n");
+  fprintf(stderr, "  -H HEIGHT\t\tOutput image height\n");
+  fprintf(stderr, "  -F \t\tColor format: \n");
+  fprintf(stderr, "\t\t\t\t%s (0), %s (1), %s (2) %s (3)\n"
+      "\t\t\t\t%s (4), %s (5), %s (6) %s (7)\n ",
+      color_formats[0].str, color_formats[1].str,
+      color_formats[2].str, color_formats[3].str,
+      color_formats[4].str, color_formats[5].str,
+      color_formats[6].str, color_formats[7].str);
+  fprintf(stderr, "Optional:\n");
+  fprintf(stderr, "  -T \t\Encode thumbnail\n");
+  fprintf(stderr, "  -x TMB_WIDTH\t\tThumbnail width\n");
+  fprintf(stderr, "  -y TMB_HEIGHT\t\tThumbnail height\n");
+  fprintf(stderr, "  -Q MAIN_QUALITY\t\tMain image quality\n");
+  fprintf(stderr, "  -q TMB_QUALITY\t\tThumbnail image quality\n");
+  fprintf(stderr, "  -B \t\tBurst mode. Utilize both encoder engines on"
+          "supported targets\n");
+  fprintf(stderr, "  -M \t\tUse minimum number of output buffers \n");
+  fprintf(stderr, "\n");
+}
+
+/** main:
+ *
+ *  Arguments:
+ *    @argc
+ *    @argv
+ *
+ *  Return:
+ *       0 or -ve values
+ *
+ *  Description:
+ *       main function
+ *
+ **/
+int main(int argc, char* argv[])
+{
+  jpeg_test_input_t *p_test_input;
+  int ret = 0;
+  if (argc > 1) {
+    p_test_input = calloc(2, sizeof(*p_test_input));
+    if (!p_test_input) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      goto exit;
+    }
+    memcpy(p_test_input, &jpeg_input[0], sizeof(*p_test_input));
+    ret = mm_jpeg_test_get_input(argc, argv, p_test_input);
+    if (ret) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      goto exit;
+    }
+  } else {
+    mm_jpeg_test_print_usage();
+    return 1;
+  }
+  ret = encode_test(p_test_input);
+
+exit:
+  if (!ret) {
+    fprintf(stderr, "%-25s\n", "Success!");
+  } else {
+    fprintf(stderr, "%-25s\n", "Fail!");
+  }
+
+  if (argc > 1) {
+    if (p_test_input) {
+      free(p_test_input);
+      p_test_input = NULL;
+    }
+  }
+
+  return ret;
+}
+
+
diff --git a/camera/QCamera2/stack/mm-jpeg-interface/test/mm_jpegdec_test.c b/camera/QCamera2/stack/mm-jpeg-interface/test/mm_jpegdec_test.c
new file mode 100644
index 0000000..772cd0f
--- /dev/null
+++ b/camera/QCamera2/stack/mm-jpeg-interface/test/mm_jpegdec_test.c
@@ -0,0 +1,471 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include "mm_jpeg_interface.h"
+#include "mm_jpeg_ionbuf.h"
+#include <sys/time.h>
+#include <stdlib.h>
+
+#define MIN(a,b)  (((a) < (b)) ? (a) : (b))
+#define MAX(a,b)  (((a) > (b)) ? (a) : (b))
+#define CLAMP(x, min, max) MIN(MAX((x), (min)), (max))
+
+#define TIME_IN_US(r) ((uint64_t)r.tv_sec * 1000000LL + (uint64_t)r.tv_usec)
+struct timeval dtime[2];
+
+
+/** DUMP_TO_FILE:
+ *  @filename: file name
+ *  @p_addr: address of the buffer
+ *  @len: buffer length
+ *
+ *  dump the image to the file
+ **/
+#define DUMP_TO_FILE(filename, p_addr, len) ({ \
+  size_t rc = 0; \
+  FILE *fp = fopen(filename, "w+"); \
+  if (fp) { \
+    rc = fwrite(p_addr, 1, len, fp); \
+    fclose(fp); \
+  } else { \
+    CDBG_ERROR("%s:%d] cannot dump image", __func__, __LINE__); \
+  } \
+})
+
+static int g_count = 1, g_i;
+
+typedef struct {
+  char *filename;
+  int width;
+  int height;
+  char *out_filename;
+  int format;
+} jpeg_test_input_t;
+
+typedef struct {
+  char *filename;
+  int width;
+  int height;
+  char *out_filename;
+  pthread_mutex_t lock;
+  pthread_cond_t cond;
+  buffer_t input;
+  buffer_t output;
+  int use_ion;
+  uint32_t handle;
+  mm_jpegdec_ops_t ops;
+  uint32_t job_id[5];
+  mm_jpeg_decode_params_t params;
+  mm_jpeg_job_t job;
+  uint32_t session_id;
+} mm_jpegdec_intf_test_t;
+
+typedef struct {
+  char *format_str;
+  int eColorFormat;
+} mm_jpegdec_col_fmt_t;
+
+#define ARR_SZ(a) (sizeof(a)/sizeof(a[0]))
+
+static const mm_jpegdec_col_fmt_t col_formats[] =
+{
+  { "YCRCBLP_H2V2",      (int)MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2 },
+  { "YCBCRLP_H2V2",      (int)MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2 },
+  { "YCRCBLP_H2V1",      (int)MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V1 },
+  { "YCBCRLP_H2V1",      (int)MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V1 },
+  { "YCRCBLP_H1V2",      (int)MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V2 },
+  { "YCBCRLP_H1V2",      (int)MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V2 },
+  { "YCRCBLP_H1V1",      (int)MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V1 },
+  { "YCBCRLP_H1V1",      (int)MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V1 }
+};
+
+static void mm_jpegdec_decode_callback(jpeg_job_status_t status,
+  uint32_t client_hdl,
+  uint32_t jobId,
+  mm_jpeg_output_t *p_output,
+  void *userData)
+{
+  mm_jpegdec_intf_test_t *p_obj = (mm_jpegdec_intf_test_t *)userData;
+
+  if (status == JPEG_JOB_STATUS_ERROR) {
+    CDBG_ERROR("%s:%d] Decode error", __func__, __LINE__);
+  } else {
+    gettimeofday(&dtime[1], NULL);
+    CDBG_ERROR("%s:%d] Decode time %llu ms",
+     __func__, __LINE__, ((TIME_IN_US(dtime[1]) - TIME_IN_US(dtime[0]))/1000));
+
+    CDBG_ERROR("%s:%d] Decode success file%s addr %p len %zu",
+      __func__, __LINE__, p_obj->out_filename,
+      p_output->buf_vaddr, p_output->buf_filled_len);
+    DUMP_TO_FILE(p_obj->out_filename, p_output->buf_vaddr, p_output->buf_filled_len);
+  }
+  g_i++;
+  if (g_i >= g_count) {
+    CDBG_ERROR("%s:%d] Signal the thread", __func__, __LINE__);
+    pthread_cond_signal(&p_obj->cond);
+  }
+}
+
+int mm_jpegdec_test_alloc(buffer_t *p_buffer, int use_pmem)
+{
+  int ret = 0;
+  /*Allocate buffers*/
+  if (use_pmem) {
+    p_buffer->addr = (uint8_t *)buffer_allocate(p_buffer, 0);
+    if (NULL == p_buffer->addr) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      return -1;
+    }
+  } else {
+    /* Allocate heap memory */
+    p_buffer->addr = (uint8_t *)malloc(p_buffer->size);
+    if (NULL == p_buffer->addr) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      return -1;
+    }
+  }
+  return ret;
+}
+
+void mm_jpegdec_test_free(buffer_t *p_buffer)
+{
+  if (p_buffer->addr == NULL)
+    return;
+
+  if (p_buffer->p_pmem_fd >= 0)
+    buffer_deallocate(p_buffer);
+  else
+    free(p_buffer->addr);
+
+  memset(p_buffer, 0x0, sizeof(buffer_t));
+}
+
+int mm_jpegdec_test_read(mm_jpegdec_intf_test_t *p_obj)
+{
+  int rc = 0;
+  FILE *fp = NULL;
+  size_t file_size = 0;
+  fp = fopen(p_obj->filename, "rb");
+  if (!fp) {
+    CDBG_ERROR("%s:%d] error", __func__, __LINE__);
+    return -1;
+  }
+  fseek(fp, 0, SEEK_END);
+  file_size = (size_t)ftell(fp);
+  fseek(fp, 0, SEEK_SET);
+
+  CDBG_ERROR("%s:%d] input file size is %zu",
+    __func__, __LINE__, file_size);
+
+  p_obj->input.size = file_size;
+
+  /* allocate buffers */
+  rc = mm_jpegdec_test_alloc(&p_obj->input, p_obj->use_ion);
+  if (rc) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    return -1;
+  }
+
+  fread(p_obj->input.addr, 1, p_obj->input.size, fp);
+  fclose(fp);
+  return 0;
+}
+
+void chromaScale(mm_jpeg_color_format format, double *cScale)
+{
+  double scale;
+
+  switch(format) {
+    case MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V2:
+    case MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V2:
+      scale = 1.5;
+      break;
+    case MM_JPEG_COLOR_FORMAT_YCRCBLP_H2V1:
+    case MM_JPEG_COLOR_FORMAT_YCBCRLP_H2V1:
+    case MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V2:
+    case MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V2:
+      scale = 2.0;
+      break;
+    case MM_JPEG_COLOR_FORMAT_YCRCBLP_H1V1:
+    case MM_JPEG_COLOR_FORMAT_YCBCRLP_H1V1:
+      scale = 3.0;
+      break;
+    case MM_JPEG_COLOR_FORMAT_MONOCHROME:
+      scale = 1.0;
+      break;
+    default:
+      scale = 0;
+      CDBG_ERROR("%s:%d] color format Error",__func__, __LINE__);
+    }
+
+  *cScale = scale;
+}
+
+static int decode_init(jpeg_test_input_t *p_input, mm_jpegdec_intf_test_t *p_obj)
+{
+  int rc = -1;
+  size_t size = (size_t)(CEILING16(p_input->width) * CEILING16(p_input->height));
+  double cScale;
+  mm_jpeg_decode_params_t *p_params = &p_obj->params;
+  mm_jpeg_decode_job_t *p_job_params = &p_obj->job.decode_job;
+
+  p_obj->filename = p_input->filename;
+  p_obj->width = p_input->width;
+  p_obj->height = p_input->height;
+  p_obj->out_filename = p_input->out_filename;
+  p_obj->use_ion = 1;
+
+  pthread_mutex_init(&p_obj->lock, NULL);
+  pthread_cond_init(&p_obj->cond, NULL);
+
+  chromaScale(p_input->format, &cScale);
+  p_obj->output.size = (size_t)((double)size * cScale);
+  rc = mm_jpegdec_test_alloc(&p_obj->output, p_obj->use_ion);
+  if (rc) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    return -1;
+  }
+
+  rc = mm_jpegdec_test_read(p_obj);
+  if (rc) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    return -1;
+  }
+
+  /* set encode parameters */
+  p_params->jpeg_cb = mm_jpegdec_decode_callback;
+  p_params->userdata = p_obj;
+  p_params->color_format = p_input->format;
+
+  /* dest buffer config */
+  p_params->dest_buf[0].buf_size = p_obj->output.size;
+  p_params->dest_buf[0].buf_vaddr = p_obj->output.addr;
+  p_params->dest_buf[0].fd = p_obj->output.p_pmem_fd;
+  p_params->dest_buf[0].format = MM_JPEG_FMT_YUV;
+  p_params->dest_buf[0].offset.mp[0].len = (uint32_t)size;
+  p_params->dest_buf[0].offset.mp[1].len =
+    (uint32_t)((double)size * (cScale - 1.0));
+  p_params->dest_buf[0].offset.mp[0].stride = CEILING16(p_input->width);
+  p_params->dest_buf[0].offset.mp[0].scanline = CEILING16(p_input->height);
+  p_params->dest_buf[0].offset.mp[1].stride = CEILING16(p_input->width);
+  p_params->dest_buf[0].offset.mp[1].scanline = CEILING16(p_input->height);
+  p_params->dest_buf[0].index = 0;
+  p_params->num_dst_bufs = 1;
+
+  /* src buffer config*/
+  p_params->src_main_buf[0].buf_size = p_obj->input.size;
+  p_params->src_main_buf[0].buf_vaddr = p_obj->input.addr;
+  p_params->src_main_buf[0].fd = p_obj->input.p_pmem_fd;
+  p_params->src_main_buf[0].index = 0;
+  p_params->src_main_buf[0].format = MM_JPEG_FMT_BITSTREAM;
+  /*
+  p_params->src_main_buf[0].offset.mp[0].len = size;
+  p_params->src_main_buf[0].offset.mp[1].len = size >> 1;
+  */
+  p_params->num_src_bufs = 1;
+
+  p_job_params->dst_index = 0;
+  p_job_params->src_index = 0;
+  p_job_params->rotation = 0;
+
+  /* main dimension */
+  p_job_params->main_dim.src_dim.width = p_obj->width;
+  p_job_params->main_dim.src_dim.height = p_obj->height;
+  p_job_params->main_dim.dst_dim.width = p_obj->width;
+  p_job_params->main_dim.dst_dim.height = p_obj->height;
+  p_job_params->main_dim.crop.top = 0;
+  p_job_params->main_dim.crop.left = 0;
+  p_job_params->main_dim.crop.width = p_obj->width;
+  p_job_params->main_dim.crop.height = p_obj->height;
+
+
+  return 0;
+}
+
+void omx_test_dec_print_usage()
+{
+  fprintf(stderr, "Usage: program_name [options]\n");
+  fprintf(stderr, "Mandatory options:\n");
+  fprintf(stderr, "  -I FILE\t\tPath to the input file.\n");
+  fprintf(stderr, "  -O FILE\t\tPath for the output file.\n");
+  fprintf(stderr, "  -W WIDTH\t\tOutput image width\n");
+  fprintf(stderr, "  -H HEIGHT\t\tOutput image height\n");
+  fprintf(stderr, "Optional:\n");
+  fprintf(stderr, "  -F FORMAT\t\tDefault image format:\n");
+  fprintf(stderr, "\t\t\t\t%s (0), %s (1), %s (2) %s (3)\n"
+    "%s (4), %s (5), %s (6) %s (7)\n",
+    col_formats[0].format_str, col_formats[1].format_str,
+    col_formats[2].format_str, col_formats[3].format_str,
+    col_formats[4].format_str, col_formats[5].format_str,
+    col_formats[6].format_str, col_formats[7].format_str
+    );
+
+  fprintf(stderr, "\n");
+}
+
+static int mm_jpegdec_test_get_input(int argc, char *argv[],
+    jpeg_test_input_t *p_test)
+{
+  int c;
+
+  while ((c = getopt(argc, argv, "I:O:W:H:F:")) != -1) {
+    switch (c) {
+    case 'O':
+      p_test->out_filename = optarg;
+      fprintf(stderr, "%-25s%s\n", "Output image path",
+        p_test->out_filename);
+      break;
+    case 'I':
+      p_test->filename = optarg;
+      fprintf(stderr, "%-25s%s\n", "Input image path", p_test->filename);
+      break;
+    case 'W':
+      p_test->width = atoi(optarg);
+      fprintf(stderr, "%-25s%d\n", "Default width", p_test->width);
+      break;
+    case 'H':
+      p_test->height = atoi(optarg);
+      fprintf(stderr, "%-25s%d\n", "Default height", p_test->height);
+      break;
+    case 'F': {
+      int format = 0;
+      format = atoi(optarg);
+      int num_formats = ARR_SZ(col_formats);
+      format = CLAMP(format, 0, num_formats);
+      p_test->format = col_formats[format].eColorFormat;
+      fprintf(stderr, "%-25s%s\n", "Default image format",
+        col_formats[format].format_str);
+      break;
+    }
+    default:;
+    }
+  }
+  if (!p_test->filename || !p_test->filename || !p_test->width ||
+      !p_test->height) {
+    fprintf(stderr, "Missing required arguments.\n");
+    omx_test_dec_print_usage();
+    return -1;
+  }
+  return 0;
+}
+
+static int decode_test(jpeg_test_input_t *p_input)
+{
+  int rc = 0;
+  mm_jpegdec_intf_test_t jpeg_obj;
+  int i = 0;
+
+  memset(&jpeg_obj, 0x0, sizeof(jpeg_obj));
+  rc = decode_init(p_input, &jpeg_obj);
+  if (rc) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    return -1;
+  }
+
+  jpeg_obj.handle = jpegdec_open(&jpeg_obj.ops);
+  if (jpeg_obj.handle == 0) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    goto end;
+  }
+
+  rc = jpeg_obj.ops.create_session(jpeg_obj.handle, &jpeg_obj.params,
+    &jpeg_obj.job.decode_job.session_id);
+  if (jpeg_obj.job.decode_job.session_id == 0) {
+    CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+    goto end;
+  }
+
+  for (i = 0; i < g_count; i++) {
+    jpeg_obj.job.job_type = JPEG_JOB_TYPE_DECODE;
+
+    CDBG_ERROR("%s:%d] Starting decode job",__func__, __LINE__);
+    gettimeofday(&dtime[0], NULL);
+
+    fprintf(stderr, "Starting decode of %s into %s outw %d outh %d\n\n",
+        p_input->filename, p_input->out_filename,
+        p_input->width, p_input->height);
+    rc = jpeg_obj.ops.start_job(&jpeg_obj.job, &jpeg_obj.job_id[i]);
+    if (rc) {
+      CDBG_ERROR("%s:%d] Error",__func__, __LINE__);
+      goto end;
+    }
+  }
+
+  /*
+  usleep(5);
+  jpeg_obj.ops.abort_job(jpeg_obj.job_id[0]);
+  */
+  pthread_mutex_lock(&jpeg_obj.lock);
+  pthread_cond_wait(&jpeg_obj.cond, &jpeg_obj.lock);
+  pthread_mutex_unlock(&jpeg_obj.lock);
+
+  fprintf(stderr, "Decode time %llu ms\n",
+      ((TIME_IN_US(dtime[1]) - TIME_IN_US(dtime[0]))/1000));
+
+
+  jpeg_obj.ops.destroy_session(jpeg_obj.job.decode_job.session_id);
+
+  jpeg_obj.ops.close(jpeg_obj.handle);
+
+
+end:
+  mm_jpegdec_test_free(&jpeg_obj.input);
+  mm_jpegdec_test_free(&jpeg_obj.output);
+  return 0;
+}
+
+/** main:
+ *
+ *  Arguments:
+ *    @argc
+ *    @argv
+ *
+ *  Return:
+ *       0 or -ve values
+ *
+ *  Description:
+ *       main function
+ *
+ **/
+int main(int argc, char* argv[])
+{
+  jpeg_test_input_t dec_test_input;
+  int ret;
+
+  memset(&dec_test_input, 0, sizeof(dec_test_input));
+  ret = mm_jpegdec_test_get_input(argc, argv, &dec_test_input);
+
+  if (ret) {
+    return -1;
+  }
+
+  return decode_test(&dec_test_input);
+}
+
+
diff --git a/camera/QCamera2/util/QCameraCmdThread.cpp b/camera/QCamera2/util/QCameraCmdThread.cpp
new file mode 100644
index 0000000..ef9c74e
--- /dev/null
+++ b/camera/QCamera2/util/QCameraCmdThread.cpp
@@ -0,0 +1,216 @@
+/* Copyright (c) 2012-2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <sys/prctl.h>
+#include "QCameraCmdThread.h"
+
+using namespace android;
+
+namespace qcamera {
+
+/*===========================================================================
+ * FUNCTION   : QCameraCmdThread
+ *
+ * DESCRIPTION: default constructor of QCameraCmdThread
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraCmdThread::QCameraCmdThread() :
+    cmd_queue()
+{
+    cmd_pid = 0;
+    cam_sem_init(&sync_sem, 0);
+    cam_sem_init(&cmd_sem, 0);
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraCmdThread
+ *
+ * DESCRIPTION: deconstructor of QCameraCmdThread
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraCmdThread::~QCameraCmdThread()
+{
+    cam_sem_destroy(&sync_sem);
+    cam_sem_destroy(&cmd_sem);
+}
+
+/*===========================================================================
+ * FUNCTION   : launch
+ *
+ * DESCRIPTION: launch Cmd Thread
+ *
+ * PARAMETERS :
+ *   @start_routine : thread routine function ptr
+ *   @user_data     : user data ptr
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraCmdThread::launch(void *(*start_routine)(void *),
+                                 void* user_data)
+{
+    /* launch the thread */
+    pthread_create(&cmd_pid,
+                   NULL,
+                   start_routine,
+                   user_data);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : setName
+ *
+ * DESCRIPTION: name the cmd thread
+ *
+ * PARAMETERS :
+ *   @name : desired name for the thread
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraCmdThread::setName(const char* name)
+{
+    /* name the thread */
+    prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0);
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : sendCmd
+ *
+ * DESCRIPTION: send a command to the Cmd Thread
+ *
+ * PARAMETERS :
+ *   @cmd     : command to be executed.
+ *   @sync_cmd: flag to indicate if this is a synchorinzed cmd. If true, this call
+ *              will wait until signal is set after the command is completed.
+ *   @priority: flag to indicate if this is a cmd with priority. If true, the cmd
+ *              will be enqueued to the head with priority.
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraCmdThread::sendCmd(camera_cmd_type_t cmd, uint8_t sync_cmd, uint8_t priority)
+{
+    camera_cmd_t *node = (camera_cmd_t *)malloc(sizeof(camera_cmd_t));
+    if (NULL == node) {
+        ALOGE("%s: No memory for camera_cmd_t", __func__);
+        return NO_MEMORY;
+    }
+    memset(node, 0, sizeof(camera_cmd_t));
+    node->cmd = cmd;
+
+    if (priority) {
+        if (!cmd_queue.enqueueWithPriority((void *)node)) {
+            free(node);
+            node = NULL;
+        }
+    } else {
+        if (!cmd_queue.enqueue((void *)node)) {
+            free(node);
+            node = NULL;
+        }
+    }
+    cam_sem_post(&cmd_sem);
+
+    /* if is a sync call, need to wait until it returns */
+    if (sync_cmd) {
+        cam_sem_wait(&sync_sem);
+    }
+    return NO_ERROR;
+}
+
+/*===========================================================================
+ * FUNCTION   : getCmd
+ *
+ * DESCRIPTION: dequeue a cmommand from cmd queue
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : cmd dequeued
+ *==========================================================================*/
+camera_cmd_type_t QCameraCmdThread::getCmd()
+{
+    camera_cmd_type_t cmd = CAMERA_CMD_TYPE_NONE;
+    camera_cmd_t *node = (camera_cmd_t *)cmd_queue.dequeue();
+    if (NULL == node) {
+        ALOGD("%s: No notify avail", __func__);
+        return CAMERA_CMD_TYPE_NONE;
+    } else {
+        cmd = node->cmd;
+        free(node);
+    }
+    return cmd;
+}
+
+/*===========================================================================
+ * FUNCTION   : exit
+ *
+ * DESCRIPTION: exit the CMD thread
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *==========================================================================*/
+int32_t QCameraCmdThread::exit()
+{
+    int32_t rc = NO_ERROR;
+
+    if (cmd_pid == 0) {
+        return rc;
+    }
+
+    rc = sendCmd(CAMERA_CMD_TYPE_EXIT, 0, 1);
+    if (NO_ERROR != rc) {
+        ALOGE("%s: Error during exit, rc = %d", __func__, rc);
+        return rc;
+    }
+
+    /* wait until cmd thread exits */
+    if (pthread_join(cmd_pid, NULL) != 0) {
+        ALOGD("%s: pthread dead already\n", __func__);
+    }
+    cmd_pid = 0;
+    return rc;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/util/QCameraCmdThread.h b/camera/QCamera2/util/QCameraCmdThread.h
new file mode 100644
index 0000000..a9511dc
--- /dev/null
+++ b/camera/QCamera2/util/QCameraCmdThread.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2012, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_CMD_THREAD_H__
+#define __QCAMERA_CMD_THREAD_H__
+
+#include <pthread.h>
+#include <cam_semaphore.h>
+
+#include "cam_types.h"
+#include "QCameraQueue.h"
+
+namespace qcamera {
+
+typedef enum
+{
+    CAMERA_CMD_TYPE_NONE,
+    CAMERA_CMD_TYPE_START_DATA_PROC,
+    CAMERA_CMD_TYPE_STOP_DATA_PROC,
+    CAMERA_CMD_TYPE_DO_NEXT_JOB,
+    CAMERA_CMD_TYPE_EXIT,
+    CAMERA_CMD_TYPE_MAX
+} camera_cmd_type_t;
+
+typedef struct {
+    camera_cmd_type_t cmd;
+} camera_cmd_t;
+
+class QCameraCmdThread {
+public:
+    QCameraCmdThread();
+    ~QCameraCmdThread();
+
+    int32_t launch(void *(*start_routine)(void *), void* user_data);
+    int32_t setName(const char* name);
+    int32_t exit();
+    int32_t sendCmd(camera_cmd_type_t cmd, uint8_t sync_cmd, uint8_t priority);
+    camera_cmd_type_t getCmd();
+
+    QCameraQueue cmd_queue;      /* cmd queue */
+    pthread_t cmd_pid;           /* cmd thread ID */
+    cam_semaphore_t cmd_sem;               /* semaphore for cmd thread */
+    cam_semaphore_t sync_sem;              /* semaphore for synchronized call signal */
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA_CMD_THREAD_H__ */
diff --git a/camera/QCamera2/util/QCameraFlash.cpp b/camera/QCamera2/util/QCameraFlash.cpp
new file mode 100644
index 0000000..2ca8fb4
--- /dev/null
+++ b/camera/QCamera2/util/QCameraFlash.cpp
@@ -0,0 +1,419 @@
+/* Copyright (c) 2015, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#include <stdio.h>
+#include <fcntl.h>
+#include <linux/media.h>
+#include <media/msmb_camera.h>
+#include <media/msm_cam_sensor.h>
+#include <utils/Log.h>
+
+#include "HAL3/QCamera3HWI.h"
+#include "QCameraFlash.h"
+
+#define STRING_LENGTH_OF_64_BIT_NUMBER 21
+
+volatile uint32_t gCamHal3LogLevel = 1;
+
+namespace qcamera {
+
+/*===========================================================================
+ * FUNCTION   : getInstance
+ *
+ * DESCRIPTION: Get and create the QCameraFlash singleton.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraFlash& QCameraFlash::getInstance()
+{
+    static QCameraFlash flashInstance;
+    return flashInstance;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraFlash
+ *
+ * DESCRIPTION: default constructor of QCameraFlash
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraFlash::QCameraFlash() : m_callbacks(NULL)
+{
+    memset(&m_flashOn, 0, sizeof(m_flashOn));
+    memset(&m_cameraOpen, 0, sizeof(m_cameraOpen));
+    for (int pos = 0; pos < MM_CAMERA_MAX_NUM_SENSORS; pos++) {
+        m_flashFds[pos] = -1;
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraFlash
+ *
+ * DESCRIPTION: deconstructor of QCameraFlash
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraFlash::~QCameraFlash()
+{
+    for (int pos = 0; pos < MM_CAMERA_MAX_NUM_SENSORS; pos++) {
+        if (m_flashFds[pos] >= 0)
+            {
+                setFlashMode(pos, false);
+                close(m_flashFds[pos]);
+                m_flashFds[pos] = -1;
+            }
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : registerCallbacks
+ *
+ * DESCRIPTION: provide flash module with reference to callbacks to framework
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+int32_t QCameraFlash::registerCallbacks(
+        const camera_module_callbacks_t* callbacks)
+{
+    int32_t retVal = 0;
+    m_callbacks = callbacks;
+    return retVal;
+}
+
+/*===========================================================================
+ * FUNCTION   : initFlash
+ *
+ * DESCRIPTION: Reserve and initialize the flash unit associated with a
+ *              given camera id. This function is blocking until the
+ *              operation completes or fails. Each flash unit can be "inited"
+ *              by only one process at a time.
+ *
+ * PARAMETERS :
+ *   @camera_id : Camera id of the flash.
+ *
+ * RETURN     :
+ *   0        : success
+ *   -EBUSY   : The flash unit or the resource needed to turn on the
+ *              the flash is busy, typically because the flash is
+ *              already in use.
+ *   -EINVAL  : No flash present at camera_id.
+ *==========================================================================*/
+int32_t QCameraFlash::initFlash(const int camera_id)
+{
+    int32_t retVal = 0;
+    bool hasFlash = false;
+    char flashNode[QCAMERA_MAX_FILEPATH_LENGTH];
+    char flashPath[QCAMERA_MAX_FILEPATH_LENGTH] = "/dev/";
+
+    if (camera_id < 0 || camera_id >= MM_CAMERA_MAX_NUM_SENSORS) {
+        ALOGE("%s: Invalid camera id: %d", __func__, camera_id);
+        return -EINVAL;
+    }
+
+    QCamera3HardwareInterface::getFlashInfo(camera_id,
+            hasFlash,
+            flashNode);
+
+    strlcat(flashPath,
+            flashNode,
+            sizeof(flashPath));
+
+    if (!hasFlash) {
+        ALOGE("%s: No flash available for camera id: %d",
+                __func__,
+                camera_id);
+        retVal = -EINVAL;
+    } else if (m_cameraOpen[camera_id]) {
+        ALOGE("%s: Camera in use for camera id: %d",
+                __func__,
+                camera_id);
+        retVal = -EBUSY;
+    } else if (m_flashFds[camera_id] >= 0) {
+        CDBG("%s: Flash is already inited for camera id: %d",
+                __func__,
+                camera_id);
+    } else {
+        m_flashFds[camera_id] = open(flashPath, O_RDWR | O_NONBLOCK);
+
+        if (m_flashFds[camera_id] < 0) {
+            ALOGE("%s: Unable to open node '%s'",
+                    __func__,
+                    flashPath);
+            retVal = -EBUSY;
+        } else {
+            struct msm_flash_cfg_data_t cfg;
+            struct msm_flash_init_info_t init_info;
+            memset(&cfg, 0, sizeof(struct msm_flash_cfg_data_t));
+            memset(&init_info, 0, sizeof(struct msm_flash_init_info_t));
+            init_info.flash_driver_type = FLASH_DRIVER_DEFAULT;
+            cfg.cfg.flash_init_info = &init_info;
+            cfg.cfg_type = CFG_FLASH_INIT;
+            retVal = ioctl(m_flashFds[camera_id],
+                    VIDIOC_MSM_FLASH_CFG,
+                    &cfg);
+            if (retVal < 0) {
+                ALOGE("%s: Unable to init flash for camera id: %d",
+                        __func__,
+                        camera_id);
+                close(m_flashFds[camera_id]);
+                m_flashFds[camera_id] = -1;
+            }
+
+            /* wait for PMIC to init */
+            usleep(5000);
+        }
+    }
+
+    CDBG("%s: X, retVal = %d", __func__, retVal);
+    return retVal;
+}
+
+/*===========================================================================
+ * FUNCTION   : setFlashMode
+ *
+ * DESCRIPTION: Turn on or off the flash associated with a given handle.
+ *              This function is blocking until the operation completes or
+ *              fails.
+ *
+ * PARAMETERS :
+ *   @camera_id  : Camera id of the flash
+ *   @on         : Whether to turn flash on (true) or off (false)
+ *
+ * RETURN     :
+ *   0        : success
+ *   -EINVAL  : No camera present at camera_id, or it is not inited.
+ *   -EALREADY: Flash is already in requested state
+ *==========================================================================*/
+int32_t QCameraFlash::setFlashMode(const int camera_id, const bool mode)
+{
+    int32_t retVal = 0;
+    struct msm_flash_cfg_data_t cfg;
+
+    if (camera_id < 0 || camera_id >= MM_CAMERA_MAX_NUM_SENSORS) {
+        ALOGE("%s: Invalid camera id: %d", __func__, camera_id);
+        retVal = -EINVAL;
+    } else if (mode == m_flashOn[camera_id]) {
+        CDBG("%s: flash %d is already in requested state: %d",
+                __func__,
+                camera_id,
+                mode);
+        retVal = -EALREADY;
+    } else if (m_flashFds[camera_id] < 0) {
+        ALOGE("%s: called for uninited flash: %d", __func__, camera_id);
+        retVal = -EINVAL;
+    }  else {
+        memset(&cfg, 0, sizeof(struct msm_flash_cfg_data_t));
+        for (int i = 0; i < MAX_LED_TRIGGERS; i++)
+            cfg.flash_current[i] = QCAMERA_TORCH_CURRENT_VALUE;
+        cfg.cfg_type = mode ? CFG_FLASH_LOW: CFG_FLASH_OFF;
+
+        retVal = ioctl(m_flashFds[camera_id],
+                        VIDIOC_MSM_FLASH_CFG,
+                        &cfg);
+        if (retVal < 0)
+            ALOGE("%s: Unable to change flash mode to %d for camera id: %d",
+                    __func__, mode, camera_id);
+        else
+            m_flashOn[camera_id] = mode;
+    }
+    return retVal;
+}
+
+/*===========================================================================
+ * FUNCTION   : deinitFlash
+ *
+ * DESCRIPTION: Release the flash unit associated with a given camera
+ *              position. This function is blocking until the operation
+ *              completes or fails.
+ *
+ * PARAMETERS :
+ *   @camera_id : Camera id of the flash.
+ *
+ * RETURN     :
+ *   0        : success
+ *   -EINVAL  : No camera present at camera_id or not inited.
+ *==========================================================================*/
+int32_t QCameraFlash::deinitFlash(const int camera_id)
+{
+    int32_t retVal = 0;
+
+    if (camera_id < 0 || camera_id >= MM_CAMERA_MAX_NUM_SENSORS) {
+        ALOGE("%s: Invalid camera id: %d", __func__, camera_id);
+        retVal = -EINVAL;
+    } else if (m_flashFds[camera_id] < 0) {
+        ALOGE("%s: called deinitFlash for uninited flash", __func__);
+        retVal = -EINVAL;
+    } else {
+        setFlashMode(camera_id, false);
+
+        struct msm_flash_cfg_data_t cfg;
+        cfg.cfg_type = CFG_FLASH_RELEASE;
+        retVal = ioctl(m_flashFds[camera_id],
+                VIDIOC_MSM_FLASH_CFG,
+                &cfg);
+        if (retVal < 0) {
+            ALOGE("%s: Failed to release flash for camera id: %d",
+                    __func__,
+                    camera_id);
+        }
+
+        close(m_flashFds[camera_id]);
+        m_flashFds[camera_id] = -1;
+    }
+
+    return retVal;
+}
+
+/*===========================================================================
+ * FUNCTION   : reserveFlashForCamera
+ *
+ * DESCRIPTION: Give control of the flash to the camera, and notify
+ *              framework that the flash has become unavailable.
+ *
+ * PARAMETERS :
+ *   @camera_id : Camera id of the flash.
+ *
+ * RETURN     :
+ *   0        : success
+ *   -EINVAL  : No camera present at camera_id or not inited.
+ *   -ENOSYS  : No callback available for torch_mode_status_change.
+ *==========================================================================*/
+int32_t QCameraFlash::reserveFlashForCamera(const int camera_id)
+{
+    int32_t retVal = 0;
+
+    if (camera_id < 0 || camera_id >= MM_CAMERA_MAX_NUM_SENSORS) {
+        ALOGE("%s: Invalid camera id: %d", __func__, camera_id);
+        retVal = -EINVAL;
+    } else if (m_cameraOpen[camera_id]) {
+        CDBG("%s: Flash already reserved for camera id: %d",
+                __func__,
+                camera_id);
+    } else {
+        if (m_flashOn[camera_id]) {
+            setFlashMode(camera_id, false);
+            deinitFlash(camera_id);
+        }
+        m_cameraOpen[camera_id] = true;
+
+        bool hasFlash = false;
+        char flashNode[QCAMERA_MAX_FILEPATH_LENGTH];
+
+        QCamera3HardwareInterface::getFlashInfo(camera_id,
+                hasFlash,
+                flashNode);
+
+        if (m_callbacks == NULL ||
+                m_callbacks->torch_mode_status_change == NULL) {
+            ALOGE("%s: Callback is not defined!", __func__);
+            retVal = -ENOSYS;
+        } else if (!hasFlash) {
+            CDBG("%s: Suppressing callback "
+                    "because no flash exists for camera id: %d",
+                    __func__,
+                    camera_id);
+        } else {
+            char cameraIdStr[STRING_LENGTH_OF_64_BIT_NUMBER];
+            snprintf(cameraIdStr, STRING_LENGTH_OF_64_BIT_NUMBER,
+                    "%d", camera_id);
+            m_callbacks->torch_mode_status_change(m_callbacks,
+                    cameraIdStr,
+                    TORCH_MODE_STATUS_NOT_AVAILABLE);
+        }
+    }
+
+    return retVal;
+}
+
+/*===========================================================================
+ * FUNCTION   : releaseFlashFromCamera
+ *
+ * DESCRIPTION: Release control of the flash from the camera, and notify
+ *              framework that the flash has become available.
+ *
+ * PARAMETERS :
+ *   @camera_id : Camera id of the flash.
+ *
+ * RETURN     :
+ *   0        : success
+ *   -EINVAL  : No camera present at camera_id or not inited.
+ *   -ENOSYS  : No callback available for torch_mode_status_change.
+ *==========================================================================*/
+int32_t QCameraFlash::releaseFlashFromCamera(const int camera_id)
+{
+    int32_t retVal = 0;
+
+    if (camera_id < 0 || camera_id >= MM_CAMERA_MAX_NUM_SENSORS) {
+        ALOGE("%s: Invalid camera id: %d", __func__, camera_id);
+        retVal = -EINVAL;
+    } else if (!m_cameraOpen[camera_id]) {
+        CDBG("%s: Flash not reserved for camera id: %d",
+                __func__,
+                camera_id);
+    } else {
+        m_cameraOpen[camera_id] = false;
+
+        bool hasFlash = false;
+        char flashNode[QCAMERA_MAX_FILEPATH_LENGTH];
+
+        QCamera3HardwareInterface::getFlashInfo(camera_id,
+                hasFlash,
+                flashNode);
+
+        if (m_callbacks == NULL ||
+                m_callbacks->torch_mode_status_change == NULL) {
+            ALOGE("%s: Callback is not defined!", __func__);
+            retVal = -ENOSYS;
+        } else if (!hasFlash) {
+            CDBG("%s: Suppressing callback "
+                    "because no flash exists for camera id: %d",
+                    __func__,
+                    camera_id);
+        } else {
+            char cameraIdStr[STRING_LENGTH_OF_64_BIT_NUMBER];
+            snprintf(cameraIdStr, STRING_LENGTH_OF_64_BIT_NUMBER,
+                    "%d", camera_id);
+            m_callbacks->torch_mode_status_change(m_callbacks,
+                    cameraIdStr,
+                    TORCH_MODE_STATUS_AVAILABLE_OFF);
+        }
+    }
+
+    return retVal;
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/util/QCameraFlash.h b/camera/QCamera2/util/QCameraFlash.h
new file mode 100755
index 0000000..16cab03
--- /dev/null
+++ b/camera/QCamera2/util/QCameraFlash.h
@@ -0,0 +1,68 @@
+/* Copyright (c) 2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_FLASH_H__
+#define __QCAMERA_FLASH_H__
+
+#include <hardware/camera_common.h>
+
+extern "C" {
+#include <mm_camera_interface.h>
+}
+
+namespace qcamera {
+
+#define QCAMERA_TORCH_CURRENT_VALUE 200
+
+class QCameraFlash {
+public:
+    static QCameraFlash& getInstance();
+
+    int32_t registerCallbacks(const camera_module_callbacks_t* callbacks);
+    int32_t initFlash(const int camera_id);
+    int32_t setFlashMode(const int camera_id, const bool on);
+    int32_t deinitFlash(const int camera_id);
+    int32_t reserveFlashForCamera(const int camera_id);
+    int32_t releaseFlashFromCamera(const int camera_id);
+
+private:
+    QCameraFlash();
+    virtual ~QCameraFlash();
+    QCameraFlash(const QCameraFlash&);
+    QCameraFlash& operator=(const QCameraFlash&);
+
+    const camera_module_callbacks_t *m_callbacks;
+    int32_t m_flashFds[MM_CAMERA_MAX_NUM_SENSORS];
+    bool m_flashOn[MM_CAMERA_MAX_NUM_SENSORS];
+    bool m_cameraOpen[MM_CAMERA_MAX_NUM_SENSORS];
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA_FLASH_H__ */
diff --git a/camera/QCamera2/util/QCameraPerf.cpp b/camera/QCamera2/util/QCameraPerf.cpp
new file mode 100644
index 0000000..d082c83
--- /dev/null
+++ b/camera/QCamera2/util/QCameraPerf.cpp
@@ -0,0 +1,474 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#define LOG_TAG "QCameraPerf"
+
+#include <cutils/properties.h>
+#include <stdlib.h>
+#include <utils/Log.h>
+#include "QCameraPerf.h"
+
+#ifdef CDBG
+#undef CDBG
+#endif //#ifdef CDBG
+#define CDBG(fmt, args...) ALOGD_IF(gCamHalLogLevel >= 2, fmt, ##args)
+
+#ifdef CDBG_HIGH
+#undef CDBG_HIGH
+#endif //#ifdef CDBG_HIGH
+#define CDBG_HIGH(fmt, args...) ALOGD_IF(gCamHalLogLevel >= 1, fmt, ##args)
+
+
+namespace qcamera {
+
+extern volatile uint32_t gCamHalLogLevel;
+
+/*===========================================================================
+ * FUNCTION   : QCameraPerfLock constructor
+ *
+ * DESCRIPTION: initialize member variables
+ *
+ * PARAMETERS :
+ *   None
+ *
+ * RETURN     : void
+ *
+ *==========================================================================*/
+QCameraPerfLock::QCameraPerfLock() :
+        perf_lock_acq(NULL),
+        perf_lock_rel(NULL),
+        mDlHandle(NULL),
+        mPerfLockEnable(0),
+        mPerfLockHandle(-1),
+        mPerfLockHandleTimed(-1),
+        mTimerSet(0),
+        mPerfLockTimeout(0),
+        mStartTimeofLock(0)
+{
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraPerfLock destructor
+ *
+ * DESCRIPTION: class desctructor
+ *
+ * PARAMETERS :
+ *   None
+ *
+ * RETURN     : void
+ *
+ *==========================================================================*/
+QCameraPerfLock::~QCameraPerfLock()
+{
+    lock_deinit();
+}
+
+
+/*===========================================================================
+ * FUNCTION   : lock_init
+ *
+ * DESCRIPTION: opens the performance lib and initilizes the perf lock functions
+ *
+ * PARAMETERS :
+ *   None
+ *
+ * RETURN     : void
+ *
+ *==========================================================================*/
+void QCameraPerfLock::lock_init()
+{
+    const char *rc;
+    char value[PROPERTY_VALUE_MAX];
+    int len;
+
+    CDBG("%s E", __func__);
+    Mutex::Autolock lock(mLock);
+
+    property_get("persist.camera.perflock.enable", value, "1");
+    mPerfLockEnable = atoi(value);
+    mCurrentPowerHintEnable = 0;
+#ifdef HAS_MULTIMEDIA_HINTS
+    if (hw_get_module(POWER_HARDWARE_MODULE_ID, (const hw_module_t **)&m_pPowerModule)) {
+        ALOGE("%s: %s module not found", __func__, POWER_HARDWARE_MODULE_ID);
+    }
+#endif
+
+    if (mPerfLockEnable) {
+        perf_lock_acq = NULL;
+        perf_lock_rel = NULL;
+        mPerfLockHandle = -1;
+        /* Retrieve name of vendor extension library */
+        if (property_get("ro.vendor.extension_library", value, NULL) <= 0) {
+            goto cleanup;
+        }
+
+        mDlHandle = dlopen(value, RTLD_NOW | RTLD_LOCAL);
+        if (mDlHandle == NULL) {
+            goto cleanup;
+        }
+
+        dlerror();
+
+        perf_lock_acq = (int (*) (int, int, int[], int))dlsym(mDlHandle, "perf_lock_acq");
+        if ((rc = dlerror()) != NULL) {
+            ALOGE("%s: failed to perf_lock_acq function handle", __func__);
+            goto cleanup;
+        }
+
+        perf_lock_rel = (int (*) (int))dlsym(mDlHandle, "perf_lock_rel");
+        if ((rc = dlerror()) != NULL) {
+            ALOGE("%s: failed to perf_lock_rel function handle", __func__);
+            goto cleanup;
+        }
+        CDBG("%s X", __func__);
+        return;
+
+cleanup:
+        perf_lock_acq  = NULL;
+        perf_lock_rel  = NULL;
+        mPerfLockEnable = 0;
+        if (mDlHandle) {
+            dlclose(mDlHandle);
+            mDlHandle = NULL;
+        }
+    }
+    CDBG("%s X", __func__);
+}
+
+/*===========================================================================
+ * FUNCTION   : lock_deinit
+ *
+ * DESCRIPTION: deinitialize the perf lock parameters
+ *
+ * PARAMETERS :
+ *   None
+ *
+ * RETURN     : void
+ *
+ *==========================================================================*/
+void QCameraPerfLock::lock_deinit()
+{
+    Mutex::Autolock lock(mLock);
+    if (mPerfLockEnable) {
+        CDBG("%s E", __func__);
+        if (mDlHandle) {
+            perf_lock_acq  = NULL;
+            perf_lock_rel  = NULL;
+
+            dlclose(mDlHandle);
+            mDlHandle       = NULL;
+        }
+        mPerfLockEnable = 0;
+        CDBG("%s X", __func__);
+    }
+}
+
+/*===========================================================================
+ * FUNCTION   : isTimerReset
+ *
+ * DESCRIPTION: Check if timout duration is reached
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : true if timeout reached
+ *              false if timeout not reached
+ *
+ *==========================================================================*/
+bool QCameraPerfLock::isTimerReset()
+{
+    Mutex::Autolock lock(mLock);
+    if (mPerfLockEnable && mTimerSet) {
+        nsecs_t timeDiff = systemTime() - mStartTimeofLock;
+        if (ns2ms(timeDiff) > (uint32_t)mPerfLockTimeout) {
+            mTimerSet = 0;
+            return true;
+        }
+    }
+    return false;
+}
+
+/*===========================================================================
+ * FUNCTION   : start_timer
+ *
+ * DESCRIPTION: get the start of the timer
+ *
+ * PARAMETERS :
+ *  @timer_val: timer duration in milliseconds
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *==========================================================================*/
+void QCameraPerfLock::startTimer(uint32_t timer_val)
+{
+    mStartTimeofLock = systemTime();
+    mTimerSet = 1;
+    mPerfLockTimeout = timer_val;
+}
+
+/*===========================================================================
+ * FUNCTION   : lock_acq_timed
+ *
+ * DESCRIPTION: Acquire the performance lock for the specified duration.
+ *              If an existing lock timeout has not elapsed, extend the
+ *              lock further for the specified duration
+ *
+ * PARAMETERS :
+ *  @timer_val: lock duration
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *==========================================================================*/
+int32_t QCameraPerfLock::lock_acq_timed(int32_t timer_val)
+{
+    int32_t ret = -1;
+
+    CDBG("%s E", __func__);
+    Mutex::Autolock lock(mLock);
+
+    if (mPerfLockEnable) {
+        int32_t perf_lock_params[] = {
+                ALL_CPUS_PWR_CLPS_DIS,
+                CPU0_MIN_FREQ_TURBO_MAX,
+                CPU4_MIN_FREQ_TURBO_MAX
+        };
+        if (mTimerSet) {
+            nsecs_t curElapsedTime = systemTime() - mStartTimeofLock;
+            int32_t pendingTimeout = mPerfLockTimeout - ns2ms(curElapsedTime);
+            timer_val += pendingTimeout;
+        }
+        startTimer(timer_val);
+
+        // Disable power hint when acquiring the perf lock
+        if (mCurrentPowerHintEnable) {
+            CDBG_HIGH("%s mCurrentPowerHintEnable %d", __func__ ,mCurrentPowerHintEnable);
+            powerHintInternal(mCurrentPowerHint, 0);
+        }
+
+        if ((NULL != perf_lock_acq) && (mPerfLockHandleTimed < 0)) {
+            ret = (*perf_lock_acq)(mPerfLockHandleTimed, timer_val, perf_lock_params,
+                    sizeof(perf_lock_params) / sizeof(int32_t));
+            CDBG("%s ret %d", __func__, ret);
+            if (ret < 0) {
+                ALOGE("%s: failed to acquire lock", __func__);
+            } else {
+                mPerfLockHandleTimed = ret;
+            }
+        }
+        CDBG("%s perf_handle_acq %d ",__func__, mPerfLockHandleTimed);
+    }
+
+    CDBG("%s X", __func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : lock_acq
+ *
+ * DESCRIPTION: acquire the performance lock
+ *
+ * PARAMETERS :
+ *   None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *==========================================================================*/
+int32_t QCameraPerfLock::lock_acq()
+{
+    int32_t ret = -1;
+
+    CDBG("%s E", __func__);
+    Mutex::Autolock lock(mLock);
+
+    if (mPerfLockEnable) {
+        int32_t perf_lock_params[] = {
+                ALL_CPUS_PWR_CLPS_DIS,
+                CPU0_MIN_FREQ_TURBO_MAX,
+                CPU4_MIN_FREQ_TURBO_MAX
+        };
+
+        // Disable power hint when acquiring the perf lock
+        if (mCurrentPowerHintEnable) {
+            powerHintInternal(mCurrentPowerHint, 0);
+        }
+
+        if ((NULL != perf_lock_acq) && (mPerfLockHandle < 0)) {
+            ret = (*perf_lock_acq)(mPerfLockHandle, ONE_SEC, perf_lock_params,
+                    sizeof(perf_lock_params) / sizeof(int32_t));
+            CDBG("%s ret %d", __func__, ret);
+            if (ret < 0) {
+                ALOGE("%s: failed to acquire lock", __func__);
+            } else {
+                mPerfLockHandle = ret;
+            }
+        }
+        CDBG("%s perf_handle_acq %d ",__func__, mPerfLockHandle);
+    }
+
+    CDBG("%s X", __func__);
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : lock_rel_timed
+ *
+ * DESCRIPTION: release the performance lock
+ *
+ * PARAMETERS :
+ *   None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *==========================================================================*/
+int32_t QCameraPerfLock::lock_rel_timed()
+{
+    int ret = -1;
+    Mutex::Autolock lock(mLock);
+    if (mPerfLockEnable) {
+        CDBG("%s E", __func__);
+        if (mPerfLockHandleTimed < 0) {
+            ALOGE("%s: mPerfLockHandle < 0,check if lock is acquired", __func__);
+            return ret;
+        }
+        CDBG("%s perf_handle_rel %d ",__func__, mPerfLockHandleTimed);
+
+        if ((NULL != perf_lock_rel) && (0 <= mPerfLockHandleTimed)) {
+            ret = (*perf_lock_rel)(mPerfLockHandleTimed);
+            if (ret < 0) {
+                ALOGE("%s: failed to release lock", __func__);
+            }
+            mPerfLockHandleTimed = -1;
+        }
+
+        if ((mCurrentPowerHintEnable == 1) && (mTimerSet == 0)) {
+            powerHintInternal(mCurrentPowerHint, mCurrentPowerHintEnable);
+        }
+        CDBG("%s X", __func__);
+    }
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : lock_rel
+ *
+ * DESCRIPTION: release the performance lock
+ *
+ * PARAMETERS :
+ *   None
+ *
+ * RETURN     : int32_t type of status
+ *              NO_ERROR  -- success
+ *              none-zero failure code
+ *
+ *==========================================================================*/
+int32_t QCameraPerfLock::lock_rel()
+{
+    int ret = -1;
+    Mutex::Autolock lock(mLock);
+    if (mPerfLockEnable) {
+        CDBG("%s E", __func__);
+        if (mPerfLockHandle < 0) {
+            ALOGE("%s: mPerfLockHandle < 0,check if lock is acquired", __func__);
+            return ret;
+        }
+        CDBG("%s perf_handle_rel %d ",__func__, mPerfLockHandle);
+
+        if ((NULL != perf_lock_rel) && (0 <= mPerfLockHandle)) {
+            ret = (*perf_lock_rel)(mPerfLockHandle);
+            if (ret < 0) {
+                ALOGE("%s: failed to release lock", __func__);
+            }
+            mPerfLockHandle = -1;
+        }
+
+        if ((mCurrentPowerHintEnable == 1) && (mTimerSet == 0)) {
+            powerHintInternal(mCurrentPowerHint, mCurrentPowerHintEnable);
+        }
+        CDBG("%s X", __func__);
+    }
+    return ret;
+}
+
+/*===========================================================================
+ * FUNCTION   : powerHintInternal
+ *
+ * DESCRIPTION: Sets the requested power hint and state to power HAL.
+ *
+ * PARAMETERS :
+ * enable     : Enable power hint if set to 1. Disable if set to 0.
+ * RETURN     : void
+ *
+ *==========================================================================*/
+void QCameraPerfLock::powerHintInternal(power_hint_t hint, uint32_t enable)
+{
+#ifdef HAS_MULTIMEDIA_HINTS
+    if (m_pPowerModule != NULL) {
+        if (enable == 1) {
+            m_pPowerModule->powerHint(m_pPowerModule, hint, (void *)"state=1");
+        }
+        else {
+            m_pPowerModule->powerHint(m_pPowerModule, hint, (void *)"state=0");
+        }
+    }
+#endif
+}
+
+/*===========================================================================
+ * FUNCTION   : powerHint
+ *
+ * DESCRIPTION: Sets the requested power hint and state to power HAL.
+ *
+ * PARAMETERS :
+ * hint       : Power hint
+ * enable     : Enable power hint if set to 1. Disable if set to 0.
+ * RETURN     : void
+ *
+ *==========================================================================*/
+void QCameraPerfLock::powerHint(power_hint_t hint, uint32_t enable)
+{
+#ifdef HAS_MULTIMEDIA_HINTS
+    if (mCurrentPowerHintEnable) {
+        //disable previous hint
+        powerHintInternal(mCurrentPowerHint, 0);
+    }
+    powerHintInternal(hint, enable);
+
+    mCurrentPowerHint       = hint;
+    mCurrentPowerHintEnable = enable;
+#endif
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/util/QCameraPerf.h b/camera/QCamera2/util/QCameraPerf.h
new file mode 100644
index 0000000..ca9000c
--- /dev/null
+++ b/camera/QCamera2/util/QCameraPerf.h
@@ -0,0 +1,87 @@
+/* Copyright (c) 2015, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERAPERF_H__
+#define __QCAMERAPERF_H__
+
+#include <dlfcn.h>
+#include <utils/Mutex.h>
+#include <hardware/power.h>
+
+typedef enum {
+    ALL_CORES_ONLINE = 0x7FE,
+    ALL_CPUS_PWR_CLPS_DIS = 0x101,
+    CPU0_MIN_FREQ_TURBO_MAX = 0x2FE,
+    CPU4_MIN_FREQ_TURBO_MAX = 0x1FFE,
+}perf_lock_params_t;
+
+/* Time related macros */
+#define ONE_SEC 1000
+typedef int64_t nsecs_t;
+#define NSEC_PER_SEC 1000000000LLU
+
+using namespace android;
+
+namespace qcamera {
+
+class QCameraPerfLock {
+public:
+    QCameraPerfLock();
+    ~QCameraPerfLock();
+
+    void    lock_init();
+    void    lock_deinit();
+    int32_t lock_rel();
+    int32_t lock_acq();
+    int32_t lock_acq_timed(int32_t timer_val);
+    int32_t lock_rel_timed();
+    bool    isTimerReset();
+    void    powerHintInternal(power_hint_t hint, uint32_t enable);
+    void    powerHint(power_hint_t hint, uint32_t enable);
+
+private:
+    int32_t        (*perf_lock_acq)(int, int, int[], int);
+    int32_t        (*perf_lock_rel)(int);
+    void            startTimer(uint32_t timer_val);
+    void           *mDlHandle;
+    uint32_t        mPerfLockEnable;
+    Mutex           mLock;
+    int32_t         mPerfLockHandle;  // Performance lock library handle
+    int32_t         mPerfLockHandleTimed;  // Performance lock library handle
+    power_module_t *m_pPowerModule;   // power module Handle
+    power_hint_t    mCurrentPowerHint;
+    uint32_t        mCurrentPowerHintEnable;
+    uint32_t        mTimerSet;
+    uint32_t        mPerfLockTimeout;
+    nsecs_t         mStartTimeofLock;
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMREAPERF_H__ */
diff --git a/camera/QCamera2/util/QCameraQueue.cpp b/camera/QCamera2/util/QCameraQueue.cpp
new file mode 100644
index 0000000..b7b6f46
--- /dev/null
+++ b/camera/QCamera2/util/QCameraQueue.cpp
@@ -0,0 +1,412 @@
+/* Copyright (c) 2012, The Linux Foundataion. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*     * Redistributions of source code must retain the above copyright
+*       notice, this list of conditions and the following disclaimer.
+*     * Redistributions in binary form must reproduce the above
+*       copyright notice, this list of conditions and the following
+*       disclaimer in the documentation and/or other materials provided
+*       with the distribution.
+*     * Neither the name of The Linux Foundation nor the names of its
+*       contributors may be used to endorse or promote products derived
+*       from this software without specific prior written permission.
+*
+* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+* ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*
+*/
+
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include "QCameraQueue.h"
+
+namespace qcamera {
+
+/*===========================================================================
+ * FUNCTION   : QCameraQueue
+ *
+ * DESCRIPTION: default constructor of QCameraQueue
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraQueue::QCameraQueue()
+{
+    pthread_mutex_init(&m_lock, NULL);
+    cam_list_init(&m_head.list);
+    m_size = 0;
+    m_dataFn = NULL;
+    m_userData = NULL;
+    m_active = true;
+}
+
+/*===========================================================================
+ * FUNCTION   : QCameraQueue
+ *
+ * DESCRIPTION: constructor of QCameraQueue
+ *
+ * PARAMETERS :
+ *   @data_rel_fn : function ptr to release node data internal resource
+ *   @user_data   : user data ptr
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraQueue::QCameraQueue(release_data_fn data_rel_fn, void *user_data)
+{
+    pthread_mutex_init(&m_lock, NULL);
+    cam_list_init(&m_head.list);
+    m_size = 0;
+    m_dataFn = data_rel_fn;
+    m_userData = user_data;
+    m_active = true;
+}
+
+/*===========================================================================
+ * FUNCTION   : ~QCameraQueue
+ *
+ * DESCRIPTION: deconstructor of QCameraQueue
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+QCameraQueue::~QCameraQueue()
+{
+    flush();
+    pthread_mutex_destroy(&m_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : init
+ *
+ * DESCRIPTION: Put the queue to active state (ready to enqueue and dequeue)
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraQueue::init()
+{
+    pthread_mutex_lock(&m_lock);
+    m_active = true;
+    pthread_mutex_unlock(&m_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : isEmpty
+ *
+ * DESCRIPTION: return if the queue is empty or not
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : true -- queue is empty; false -- not empty
+ *==========================================================================*/
+bool QCameraQueue::isEmpty()
+{
+    bool flag = true;
+    pthread_mutex_lock(&m_lock);
+    if (m_size > 0) {
+        flag = false;
+    }
+    pthread_mutex_unlock(&m_lock);
+    return flag;
+}
+
+/*===========================================================================
+ * FUNCTION   : enqueue
+ *
+ * DESCRIPTION: enqueue data into the queue
+ *
+ * PARAMETERS :
+ *   @data    : data to be enqueued
+ *
+ * RETURN     : true -- success; false -- failed
+ *==========================================================================*/
+bool QCameraQueue::enqueue(void *data)
+{
+    bool rc;
+    camera_q_node *node =
+        (camera_q_node *)malloc(sizeof(camera_q_node));
+    if (NULL == node) {
+        ALOGE("%s: No memory for camera_q_node", __func__);
+        return false;
+    }
+
+    memset(node, 0, sizeof(camera_q_node));
+    node->data = data;
+
+    pthread_mutex_lock(&m_lock);
+    if (m_active) {
+        cam_list_add_tail_node(&node->list, &m_head.list);
+        m_size++;
+        rc = true;
+    } else {
+        free(node);
+        rc = false;
+    }
+    pthread_mutex_unlock(&m_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : enqueueWithPriority
+ *
+ * DESCRIPTION: enqueue data into queue with priority, will insert into the
+ *              head of the queue
+ *
+ * PARAMETERS :
+ *   @data    : data to be enqueued
+ *
+ * RETURN     : true -- success; false -- failed
+ *==========================================================================*/
+bool QCameraQueue::enqueueWithPriority(void *data)
+{
+    bool rc;
+    camera_q_node *node =
+        (camera_q_node *)malloc(sizeof(camera_q_node));
+    if (NULL == node) {
+        ALOGE("%s: No memory for camera_q_node", __func__);
+        return false;
+    }
+
+    memset(node, 0, sizeof(camera_q_node));
+    node->data = data;
+
+    pthread_mutex_lock(&m_lock);
+    if (m_active) {
+        struct cam_list *p_next = m_head.list.next;
+
+        m_head.list.next = &node->list;
+        p_next->prev = &node->list;
+        node->list.next = p_next;
+        node->list.prev = &m_head.list;
+
+        m_size++;
+        rc = true;
+    } else {
+        free(node);
+        rc = false;
+    }
+    pthread_mutex_unlock(&m_lock);
+    return rc;
+}
+
+/*===========================================================================
+ * FUNCTION   : peek
+ *
+ * DESCRIPTION: return the head element without removing it
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : data ptr. NULL if not any data in the queue.
+ *==========================================================================*/
+void* QCameraQueue::peek()
+{
+    camera_q_node* node = NULL;
+    void* data = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    pthread_mutex_lock(&m_lock);
+    if (m_active) {
+        head = &m_head.list;
+        pos = head->next;
+        if (pos != head) {
+            node = member_of(pos, camera_q_node, list);
+        }
+    }
+    pthread_mutex_unlock(&m_lock);
+
+    if (NULL != node) {
+        data = node->data;
+    }
+
+    return data;
+}
+
+/*===========================================================================
+ * FUNCTION   : dequeue
+ *
+ * DESCRIPTION: dequeue data from the queue
+ *
+ * PARAMETERS :
+ *   @bFromHead : if true, dequeue from the head
+ *                if false, dequeue from the tail
+ *
+ * RETURN     : data ptr. NULL if not any data in the queue.
+ *==========================================================================*/
+void* QCameraQueue::dequeue(bool bFromHead)
+{
+    camera_q_node* node = NULL;
+    void* data = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    pthread_mutex_lock(&m_lock);
+    if (m_active) {
+        head = &m_head.list;
+        if (bFromHead) {
+            pos = head->next;
+        } else {
+            pos = head->prev;
+        }
+        if (pos != head) {
+            node = member_of(pos, camera_q_node, list);
+            cam_list_del_node(&node->list);
+            m_size--;
+        }
+    }
+    pthread_mutex_unlock(&m_lock);
+
+    if (NULL != node) {
+        data = node->data;
+        free(node);
+    }
+
+    return data;
+}
+
+/*===========================================================================
+ * FUNCTION   : flush
+ *
+ * DESCRIPTION: flush all nodes from the queue, queue will be empty after this
+ *              operation.
+ *
+ * PARAMETERS : None
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraQueue::flush(){
+    camera_q_node* node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    pthread_mutex_lock(&m_lock);
+    if (m_active) {
+        head = &m_head.list;
+        pos = head->next;
+
+        while(pos != head) {
+            node = member_of(pos, camera_q_node, list);
+            pos = pos->next;
+            cam_list_del_node(&node->list);
+            m_size--;
+
+            if (NULL != node->data) {
+                if (m_dataFn) {
+                    m_dataFn(node->data, m_userData);
+                }
+                free(node->data);
+            }
+            free(node);
+
+        }
+        m_size = 0;
+        m_active = false;
+    }
+    pthread_mutex_unlock(&m_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : flushNodes
+ *
+ * DESCRIPTION: flush only specific nodes, depending on
+ *              the given matching function.
+ *
+ * PARAMETERS :
+ *   @match   : matching function
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraQueue::flushNodes(match_fn match){
+    camera_q_node* node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    if ( NULL == match ) {
+        return;
+    }
+
+    pthread_mutex_lock(&m_lock);
+    if (m_active) {
+        head = &m_head.list;
+        pos = head->next;
+
+        while(pos != head) {
+            node = member_of(pos, camera_q_node, list);
+            pos = pos->next;
+            if ( match(node->data, m_userData) ) {
+                cam_list_del_node(&node->list);
+                m_size--;
+
+                if (NULL != node->data) {
+                    if (m_dataFn) {
+                        m_dataFn(node->data, m_userData);
+                    }
+                    free(node->data);
+                }
+                free(node);
+            }
+        }
+    }
+    pthread_mutex_unlock(&m_lock);
+}
+
+/*===========================================================================
+ * FUNCTION   : flushNodes
+ *
+ * DESCRIPTION: flush only specific nodes, depending on
+ *              the given matching function.
+ *
+ * PARAMETERS :
+ *   @match   : matching function
+ *
+ * RETURN     : None
+ *==========================================================================*/
+void QCameraQueue::flushNodes(match_fn_data match, void *match_data){
+    camera_q_node* node = NULL;
+    struct cam_list *head = NULL;
+    struct cam_list *pos = NULL;
+
+    if ( NULL == match ) {
+        return;
+    }
+
+    pthread_mutex_lock(&m_lock);
+    if (m_active) {
+        head = &m_head.list;
+        pos = head->next;
+
+        while(pos != head) {
+            node = member_of(pos, camera_q_node, list);
+            pos = pos->next;
+            if ( match(node->data, m_userData, match_data) ) {
+                cam_list_del_node(&node->list);
+                m_size--;
+
+                if (NULL != node->data) {
+                    if (m_dataFn) {
+                        m_dataFn(node->data, m_userData);
+                    }
+                    free(node->data);
+                }
+                free(node);
+            }
+        }
+    }
+    pthread_mutex_unlock(&m_lock);
+}
+
+}; // namespace qcamera
diff --git a/camera/QCamera2/util/QCameraQueue.h b/camera/QCamera2/util/QCameraQueue.h
new file mode 100755
index 0000000..774ab60
--- /dev/null
+++ b/camera/QCamera2/util/QCameraQueue.h
@@ -0,0 +1,75 @@
+/* Copyright (c) 2012, The Linux Foundataion. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_QUEUE_H__
+#define __QCAMERA_QUEUE_H__
+
+#include <pthread.h>
+#include "cam_list.h"
+
+namespace qcamera {
+
+typedef bool (*match_fn_data)(void *data, void *user_data, void *match_data);
+typedef void (*release_data_fn)(void* data, void *user_data);
+typedef bool (*match_fn)(void *data, void *user_data);
+
+class QCameraQueue {
+public:
+    QCameraQueue();
+    QCameraQueue(release_data_fn data_rel_fn, void *user_data);
+    virtual ~QCameraQueue();
+    void init();
+    bool enqueue(void *data);
+    bool enqueueWithPriority(void *data);
+    /* This call will put queue into uninitialized state.
+     * Need to call init() in order to use the queue again */
+    void flush();
+    void flushNodes(match_fn match);
+    void flushNodes(match_fn_data match, void *spec_data);
+    void* dequeue(bool bFromHead = true);
+    void* peek();
+    bool isEmpty();
+    int getCurrentSize() {return m_size;}
+private:
+    typedef struct {
+        struct cam_list list;
+        void* data;
+    } camera_q_node;
+
+    camera_q_node m_head; // dummy head
+    int m_size;
+    bool m_active;
+    pthread_mutex_t m_lock;
+    release_data_fn m_dataFn;
+    void * m_userData;
+};
+
+}; // namespace qcamera
+
+#endif /* __QCAMERA_QUEUE_H__ */
diff --git a/camera/QCameraParameters.h b/camera/QCameraParameters.h
new file mode 100644
index 0000000..dd29dda
--- /dev/null
+++ b/camera/QCameraParameters.h
@@ -0,0 +1,257 @@
+/*
+**
+** Copyright 2008, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+#ifndef ANDROID_HARDWARE_QCAMERA_PARAMETERS_H
+#define ANDROID_HARDWARE_QCAMERA_PARAMETERS_H
+
+//#include <utils/KeyedVector.h>
+//#include <utils/String8.h>
+#include <camera/CameraParameters.h>
+
+namespace android {
+
+struct FPSRange{
+    int minFPS;
+    int maxFPS;
+    FPSRange(){
+        minFPS=0;
+        maxFPS=0;
+    };
+    FPSRange(int min,int max){
+        minFPS=min;
+        maxFPS=max;
+    };
+};
+class QCameraParameters: public CameraParameters
+{
+public:
+#if 1
+    QCameraParameters() : CameraParameters() {};
+    QCameraParameters(const String8 &params): CameraParameters(params) {};
+    #else
+    QCameraParameters() : CameraParameters() {};
+    QCameraParameters(const String8 &params) { unflatten(params); }
+#endif
+    ~QCameraParameters();
+
+    // Supported PREVIEW/RECORDING SIZES IN HIGH FRAME RATE recording, sizes in pixels.
+    // Example value: "800x480,432x320". Read only.
+    static const char KEY_SUPPORTED_HFR_SIZES[];
+    // The mode of preview frame rate.
+    // Example value: "frame-rate-auto, frame-rate-fixed".
+    static const char KEY_PREVIEW_FRAME_RATE_MODE[];
+    static const char KEY_SUPPORTED_PREVIEW_FRAME_RATE_MODES[];
+    static const char KEY_PREVIEW_FRAME_RATE_AUTO_MODE[];
+    static const char KEY_PREVIEW_FRAME_RATE_FIXED_MODE[];
+
+    static const char KEY_SKIN_TONE_ENHANCEMENT[] ;
+    static const char KEY_SUPPORTED_SKIN_TONE_ENHANCEMENT_MODES[] ;
+
+    //Touch Af/AEC settings.
+    static const char KEY_TOUCH_AF_AEC[];
+    static const char KEY_SUPPORTED_TOUCH_AF_AEC[];
+    //Touch Index for AEC.
+    static const char KEY_TOUCH_INDEX_AEC[];
+    //Touch Index for AF.
+    static const char KEY_TOUCH_INDEX_AF[];
+    // Current auto scene detection mode.
+    // Example value: "off" or SCENE_DETECT_XXX constants. Read/write.
+    static const char KEY_SCENE_DETECT[];
+    // Supported auto scene detection settings.
+    // Example value: "off,backlight,snow/cloudy". Read only.
+    static const char KEY_SUPPORTED_SCENE_DETECT[];
+	   // Returns true if video snapshot is supported. That is, applications
+    static const char KEY_FULL_VIDEO_SNAP_SUPPORTED[];
+    static const char KEY_POWER_MODE_SUPPORTED[];
+
+    static const char KEY_ISO_MODE[];
+    static const char KEY_SUPPORTED_ISO_MODES[];
+    static const char KEY_LENSSHADE[] ;
+    static const char KEY_SUPPORTED_LENSSHADE_MODES[] ;
+
+    static const char KEY_AUTO_EXPOSURE[];
+    static const char KEY_SUPPORTED_AUTO_EXPOSURE[];
+
+    static const char KEY_GPS_LATITUDE_REF[];
+    static const char KEY_GPS_LONGITUDE_REF[];
+    static const char KEY_GPS_ALTITUDE_REF[];
+    static const char KEY_GPS_STATUS[];
+    static const char KEY_EXIF_DATETIME[];
+    static const char KEY_MEMORY_COLOR_ENHANCEMENT[];
+    static const char KEY_SUPPORTED_MEM_COLOR_ENHANCE_MODES[];
+
+
+    static const char KEY_POWER_MODE[];
+
+    static const char KEY_ZSL[];
+    static const char KEY_SUPPORTED_ZSL_MODES[];
+
+    static const char KEY_CAMERA_MODE[];
+
+    static const char KEY_VIDEO_HIGH_FRAME_RATE[];
+    static const char KEY_SUPPORTED_VIDEO_HIGH_FRAME_RATE_MODES[];
+    static const char KEY_HIGH_DYNAMIC_RANGE_IMAGING[];
+    static const char KEY_SUPPORTED_HDR_IMAGING_MODES[];
+    static const char KEY_AE_BRACKET_HDR[];
+
+
+    // DENOISE
+    static const char KEY_DENOISE[];
+    static const char KEY_SUPPORTED_DENOISE[];
+
+    //Selectable zone AF.
+    static const char KEY_SELECTABLE_ZONE_AF[];
+    static const char KEY_SUPPORTED_SELECTABLE_ZONE_AF[];
+
+    //Face Detection
+    static const char KEY_FACE_DETECTION[];
+    static const char KEY_SUPPORTED_FACE_DETECTION[];
+
+    //Redeye Reduction
+    static const char KEY_REDEYE_REDUCTION[];
+    static const char KEY_SUPPORTED_REDEYE_REDUCTION[];
+    static const char EFFECT_EMBOSS[];
+    static const char EFFECT_SKETCH[];
+    static const char EFFECT_NEON[];
+
+    // Values for Touch AF/AEC
+    static const char TOUCH_AF_AEC_OFF[] ;
+    static const char TOUCH_AF_AEC_ON[] ;
+    static const char SCENE_MODE_ASD[];
+    static const char SCENE_MODE_BACKLIGHT[];
+    static const char SCENE_MODE_FLOWERS[];
+    static const char SCENE_MODE_AR[];
+    static const char SCENE_MODE_HDR[];
+	static const char SCENE_DETECT_OFF[];
+    static const char SCENE_DETECT_ON[];
+    static const char PIXEL_FORMAT_YUV420SP_ADRENO[]; // ADRENO
+	static const char PIXEL_FORMAT_RAW[];
+    static const char PIXEL_FORMAT_YV12[]; // NV12
+    static const char PIXEL_FORMAT_NV12[]; //NV12
+    // Normal focus mode. Applications should call
+    // CameraHardwareInterface.autoFocus to start the focus in this mode.
+    static const char FOCUS_MODE_NORMAL[];
+    static const char ISO_AUTO[];
+    static const char ISO_HJR[] ;
+    static const char ISO_100[];
+    static const char ISO_200[] ;
+    static const char ISO_400[];
+    static const char ISO_800[];
+    static const char ISO_1600[];
+    // Values for Lens Shading
+    static const char LENSSHADE_ENABLE[] ;
+    static const char LENSSHADE_DISABLE[] ;
+
+    // Values for auto exposure settings.
+    static const char AUTO_EXPOSURE_FRAME_AVG[];
+    static const char AUTO_EXPOSURE_CENTER_WEIGHTED[];
+    static const char AUTO_EXPOSURE_SPOT_METERING[];
+
+    static const char KEY_SHARPNESS[];
+    static const char KEY_MAX_SHARPNESS[];
+    static const char KEY_CONTRAST[];
+    static const char KEY_MAX_CONTRAST[];
+    static const char KEY_SATURATION[];
+    static const char KEY_MAX_SATURATION[];
+
+    static const char KEY_HISTOGRAM[] ;
+    static const char KEY_SUPPORTED_HISTOGRAM_MODES[] ;
+    // Values for HISTOGRAM
+    static const char HISTOGRAM_ENABLE[] ;
+    static const char HISTOGRAM_DISABLE[] ;
+
+    // Values for SKIN TONE ENHANCEMENT
+    static const char SKIN_TONE_ENHANCEMENT_ENABLE[] ;
+    static const char SKIN_TONE_ENHANCEMENT_DISABLE[] ;
+
+    // Values for Denoise
+    static const char DENOISE_OFF[] ;
+    static const char DENOISE_ON[] ;
+
+    // Values for auto exposure settings.
+    static const char SELECTABLE_ZONE_AF_AUTO[];
+    static const char SELECTABLE_ZONE_AF_SPOT_METERING[];
+    static const char SELECTABLE_ZONE_AF_CENTER_WEIGHTED[];
+    static const char SELECTABLE_ZONE_AF_FRAME_AVERAGE[];
+
+    // Values for Face Detection settings.
+    static const char FACE_DETECTION_OFF[];
+    static const char FACE_DETECTION_ON[];
+
+    // Values for MCE settings.
+    static const char MCE_ENABLE[];
+    static const char MCE_DISABLE[];
+
+    // Values for ZSL settings.
+    static const char ZSL_OFF[];
+    static const char ZSL_ON[];
+
+    // Values for HDR Bracketing settings.
+    static const char AE_BRACKET_HDR_OFF[];
+    static const char AE_BRACKET_HDR[];
+    static const char AE_BRACKET[];
+
+    // Values for Power mode settings.
+    static const char LOW_POWER[];
+    static const char NORMAL_POWER[];
+
+    // Values for HFR settings.
+    static const char VIDEO_HFR_OFF[];
+    static const char VIDEO_HFR_2X[];
+    static const char VIDEO_HFR_3X[];
+    static const char VIDEO_HFR_4X[];
+
+    // Values for Redeye Reduction settings.
+    static const char REDEYE_REDUCTION_ENABLE[];
+    static const char REDEYE_REDUCTION_DISABLE[];
+    // Values for HDR settings.
+    static const char HDR_ENABLE[];
+    static const char HDR_DISABLE[];
+
+   // Values for Redeye Reduction settings.
+   // static const char REDEYE_REDUCTION_ENABLE[];
+   // static const char REDEYE_REDUCTION_DISABLE[];
+   // Values for HDR settings.
+   //    static const char HDR_ENABLE[];
+   //    static const char HDR_DISABLE[];
+
+
+   static const char KEY_SINGLE_ISP_OUTPUT_ENABLED[];
+   static const char KEY_SUPPORTED_CAMERA_FEATURES[];
+   static const char KEY_MAX_NUM_REQUESTED_FACES[];
+
+    enum {
+        CAMERA_ORIENTATION_UNKNOWN = 0,
+        CAMERA_ORIENTATION_PORTRAIT = 1,
+        CAMERA_ORIENTATION_LANDSCAPE = 2,
+    };
+    int getOrientation() const;
+    void setOrientation(int orientation);
+    void getSupportedHfrSizes(Vector<Size> &sizes) const;
+    void setPreviewFpsRange(int minFPS,int maxFPS);
+	void setPreviewFrameRateMode(const char *mode);
+    const char *getPreviewFrameRateMode() const;
+    void setTouchIndexAec(int x, int y);
+    void getTouchIndexAec(int *x, int *y) const;
+    void setTouchIndexAf(int x, int y);
+    void getTouchIndexAf(int *x, int *y) const;
+    void getMeteringAreaCenter(int * x, int *y) const;
+
+};
+
+}; // namespace android
+
+#endif
diff --git a/camera/QCamera_Intf.h b/camera/QCamera_Intf.h
new file mode 100755
index 0000000..23faa55
--- /dev/null
+++ b/camera/QCamera_Intf.h
@@ -0,0 +1,1147 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above
+ *       copyright notice, this list of conditions and the following
+ *       disclaimer in the documentation and/or other materials provided
+ *       with the distribution.
+ *     * Neither the name of The Linux Foundation nor the names of its
+ *       contributors may be used to endorse or promote products derived
+ *       from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef __QCAMERA_INTF_H__
+#define __QCAMERA_INTF_H__
+
+#include <stdint.h>
+#include <pthread.h>
+#include <inttypes.h>
+
+#define PAD_TO_WORD(a)               (((a)+3)&~3)
+#define PAD_TO_2K(a)                 (((a)+2047)&~2047)
+#define PAD_TO_4K(a)                 (((a)+4095)&~4095)
+#define PAD_TO_8K(a)                 (((a)+8191)&~8191)
+
+#define CEILING32(X) (((X) + 0x0001F) & 0xFFFFFFE0)
+#define CEILING16(X) (((X) + 0x000F) & 0xFFF0)
+#define CEILING4(X)  (((X) + 0x0003) & 0xFFFC)
+#define CEILING2(X)  (((X) + 0x0001) & 0xFFFE)
+
+#define MAX_ROI 2
+#define MAX_NUM_PARM 5
+#define MAX_NUM_OPS 2
+#define VIDEO_MAX_PLANES 8
+#define MAX_SNAPSHOT_BUFFERS 5
+#define MAX_EXP_BRACKETING_LENGTH 32
+
+
+/* Exif Tag ID */
+typedef uint32_t exif_tag_id_t;
+
+/* Exif Info (opaque definition) */
+struct exif_info_t;
+typedef struct exif_info_t * exif_info_obj_t;
+
+typedef enum {
+  BACK_CAMERA,
+  FRONT_CAMERA,
+}cam_position_t;
+
+typedef enum {
+  CAM_CTRL_FAILED,        /* Failure in doing operation */
+  CAM_CTRL_SUCCESS,       /* Operation Succeded */
+  CAM_CTRL_INVALID_PARM,  /* Inavlid parameter provided */
+  CAM_CTRL_NOT_SUPPORTED, /* Parameter/operation not supported */
+  CAM_CTRL_ACCEPTED,      /* Parameter accepted */
+  CAM_CTRL_MAX,
+} cam_ctrl_status_t;
+
+typedef enum {
+  CAMERA_YUV_420_NV12,
+  CAMERA_YUV_420_NV21,
+  CAMERA_YUV_420_NV21_ADRENO,
+  CAMERA_BAYER_SBGGR10,
+  CAMERA_RDI,
+  CAMERA_YUV_420_YV12,
+  CAMERA_YUV_422_NV16,
+  CAMERA_YUV_422_NV61,
+  CAMERA_YUV_422_YUYV,
+  CAMERA_SAEC,
+  CAMERA_SAWB,
+  CAMERA_SAFC,
+  CAMERA_SHST,
+} cam_format_t;
+
+typedef enum {
+  CAMERA_PAD_NONE,
+  CAMERA_PAD_TO_WORD,   /*2 bytes*/
+  CAMERA_PAD_TO_LONG_WORD, /*4 bytes*/
+  CAMERA_PAD_TO_8, /*8 bytes*/
+  CAMERA_PAD_TO_16, /*16 bytes*/
+
+  CAMERA_PAD_TO_1K, /*1k bytes*/
+  CAMERA_PAD_TO_2K, /*2k bytes*/
+  CAMERA_PAD_TO_4K,
+  CAMERA_PAD_TO_8K
+} cam_pad_format_t;
+
+typedef struct {
+  int ext_mode;   /* preview, main, thumbnail, video, raw, etc */
+  int frame_idx;  /* frame index */
+  int fd;         /* origin fd */
+  uint32_t size;
+  uint8_t is_hist; /* is hist mapping? */
+} mm_camera_frame_map_type;
+
+typedef struct {
+  int ext_mode;   /* preview, main, thumbnail, video, raw, etc */
+  int frame_idx;  /* frame index */
+  uint8_t is_hist; /* is hist unmapping? */
+} mm_camera_frame_unmap_type;
+
+typedef enum {
+  CAM_SOCK_MSG_TYPE_FD_MAPPING,
+  CAM_SOCK_MSG_TYPE_FD_UNMAPPING,
+  CAM_SOCK_MSG_TYPE_WDN_START,
+  CAM_SOCK_MSG_TYPE_HDR_START,
+  CAM_SOCK_MSG_TYPE_HIST_MAPPING,
+  CAM_SOCK_MSG_TYPE_HIST_UNMAPPING,
+  CAM_SOCK_MSG_TYPE_MAX
+}mm_camera_socket_msg_type;
+#define MAX_HDR_EXP_FRAME_NUM 5
+typedef struct {
+  unsigned long cookie;
+  int num_hdr_frames;
+  int hdr_main_idx[MAX_HDR_EXP_FRAME_NUM];
+  int hdr_thm_idx[MAX_HDR_EXP_FRAME_NUM];
+  int exp[MAX_HDR_EXP_FRAME_NUM];
+} mm_camera_hdr_start_type;
+
+#define MM_MAX_WDN_NUM 2
+typedef struct {
+  unsigned long cookie;
+  int num_frames;
+  int ext_mode[MM_MAX_WDN_NUM];
+  int frame_idx[MM_MAX_WDN_NUM];
+} mm_camera_wdn_start_type;
+
+typedef struct {
+  mm_camera_socket_msg_type msg_type;
+  union {
+    mm_camera_frame_map_type frame_fd_map;
+    mm_camera_frame_unmap_type frame_fd_unmap;
+    mm_camera_wdn_start_type wdn_start;
+    mm_camera_hdr_start_type hdr_pkg;
+  } payload;
+} cam_sock_packet_t;
+
+typedef enum {
+  CAM_VIDEO_FRAME,
+  CAM_SNAPSHOT_FRAME,
+  CAM_PREVIEW_FRAME,
+}cam_frame_type_t;
+
+
+typedef enum {
+  CAMERA_MODE_2D = (1<<0),
+  CAMERA_MODE_3D = (1<<1),
+  CAMERA_NONZSL_MODE = (1<<2),
+  CAMERA_ZSL_MODE = (1<<3),
+  CAMERA_MODE_MAX = CAMERA_ZSL_MODE,
+} camera_mode_t;
+
+
+typedef struct {
+  int  modes_supported;
+  int8_t camera_id;
+  cam_position_t position;
+  uint32_t sensor_mount_angle;
+}camera_info_t;
+
+typedef struct {
+  camera_mode_t mode;
+  int8_t camera_id;
+  camera_mode_t cammode;
+}config_params_t;
+
+typedef struct {
+  uint32_t len;
+  uint32_t y_offset;
+  uint32_t cbcr_offset;
+} cam_sp_len_offset_t;
+
+typedef struct{
+  uint32_t len;
+  uint32_t offset;
+} cam_mp_len_offset_t;
+
+typedef struct {
+  int num_planes;
+  union {
+    cam_sp_len_offset_t sp;
+    cam_mp_len_offset_t mp[8];
+  };
+  uint32_t frame_len;
+} cam_frame_len_offset_t;
+
+typedef struct {
+  uint32_t parm[MAX_NUM_PARM];
+  uint32_t ops[MAX_NUM_OPS];
+  uint8_t yuv_output;
+  uint8_t jpeg_capture;
+  uint32_t max_pict_width;
+  uint32_t max_pict_height;
+  uint32_t max_preview_width;
+  uint32_t max_preview_height;
+  uint32_t max_video_width;
+  uint32_t max_video_height;
+  uint32_t effect;
+  camera_mode_t modes;
+  uint8_t preview_format;
+  uint32_t preview_sizes_cnt;
+  uint32_t thumb_sizes_cnt;
+  uint32_t video_sizes_cnt;
+  uint32_t hfr_sizes_cnt;
+  uint8_t vfe_output_enable;
+  uint8_t hfr_frame_skip;
+  uint32_t default_preview_width;
+  uint32_t default_preview_height;
+  uint32_t bestshot_reconfigure;
+  uint32_t pxlcode;
+}cam_prop_t;
+
+typedef struct {
+  uint16_t video_width;         /* Video width seen by VFE could be different than orig. Ex. DIS */
+  uint16_t video_height;        /* Video height seen by VFE */
+  uint16_t picture_width;       /* Picture width seen by VFE */
+  uint16_t picture_height;      /* Picture height seen by VFE */
+  uint16_t display_width;       /* width of display */
+  uint16_t display_height;      /* height of display */
+  uint16_t orig_video_width;    /* original video width received */
+  uint16_t orig_video_height;   /* original video height received */
+  uint16_t orig_picture_dx;     /* original picture width received */
+  uint16_t orig_picture_dy;     /* original picture height received */
+  uint16_t ui_thumbnail_height; /* Just like orig_picture_dx */
+  uint16_t ui_thumbnail_width;  /* Just like orig_picture_dy */
+  uint16_t thumbnail_height;
+  uint16_t thumbnail_width;
+  uint16_t orig_picture_width;
+  uint16_t orig_picture_height;
+  uint16_t orig_thumb_width;
+  uint16_t orig_thumb_height;
+  uint16_t raw_picture_height;
+  uint16_t raw_picture_width;
+  uint16_t rdi0_height;
+  uint16_t rdi0_width;
+  uint16_t rdi1_height;
+  uint16_t rdi1_width;
+  uint32_t hjr_xtra_buff_for_bayer_filtering;
+  cam_format_t    prev_format;
+  cam_format_t    enc_format;
+  cam_format_t    thumb_format;
+  cam_format_t    main_img_format;
+  cam_format_t    rdi0_format;
+  cam_format_t    rdi1_format;
+  cam_format_t    raw_img_format;
+  cam_pad_format_t prev_padding_format;
+  cam_pad_format_t enc_padding_format;
+  cam_pad_format_t thumb_padding_format;
+  cam_pad_format_t main_padding_format;
+  uint16_t display_luma_width;
+  uint16_t display_luma_height;
+  uint16_t display_chroma_width;
+  uint16_t display_chroma_height;
+  uint16_t video_luma_width;
+  uint16_t video_luma_height;
+  uint16_t video_chroma_width;
+  uint16_t video_chroma_height;
+  uint16_t thumbnail_luma_width;
+  uint16_t thumbnail_luma_height;
+  uint16_t thumbnail_chroma_width;
+  uint16_t thumbnail_chroma_height;
+  uint16_t main_img_luma_width;
+  uint16_t main_img_luma_height;
+  uint16_t main_img_chroma_width;
+  uint16_t main_img_chroma_height;
+  int rotation;
+  cam_frame_len_offset_t display_frame_offset;
+  cam_frame_len_offset_t video_frame_offset;
+  cam_frame_len_offset_t picture_frame_offset;
+  cam_frame_len_offset_t thumb_frame_offset;
+  uint32_t channel_interface_mask;
+} cam_ctrl_dimension_t;
+
+typedef struct {
+  uint16_t type;
+  uint16_t width;
+  uint16_t height;
+} cam_stats_buf_dimension_t;
+
+typedef struct {
+  uint8_t cid;
+  uint8_t dt;
+  uint32_t inst_handle;
+} cam_cid_entry_t;
+
+#define CAM_MAX_CID_NUM    8
+typedef struct {
+  /*should we hard code max CIDs? if not we need to have two CMD*/
+  uint8_t num_cids;
+  cam_cid_entry_t cid_entries[CAM_MAX_CID_NUM];
+} cam_cid_info_t;
+
+typedef struct {
+  /* we still use prev, video, main,
+   * thumb to interprete image types */
+  uint32_t image_mode;                 /* input */
+  cam_format_t format;                 /* input */
+  cam_pad_format_t padding_format;     /* input */
+  int rotation;                        /* input */
+  uint16_t width;                      /* input/output */
+  uint16_t height;                     /* input/output */
+  cam_frame_len_offset_t frame_offset; /* output */
+} cam_frame_resolution_t;
+
+typedef struct {
+  uint32_t instance_hdl; /* instance handler of the stream */
+  uint32_t frame_idx;    /* frame index */
+  uint16_t frame_width;
+  uint16_t frame_height;
+  cam_frame_len_offset_t frame_offset;
+} mm_camera_wnr_frame_info_t;
+
+#define MM_CAMEAR_MAX_STRAEM_BUNDLE 4
+typedef struct {
+    uint8_t num_frames;
+    mm_camera_wnr_frame_info_t frames[MM_CAMEAR_MAX_STRAEM_BUNDLE];
+} mm_camera_wnr_info_t;
+
+typedef struct {
+  uint8_t num;
+  uint32_t stream_handles[MM_CAMEAR_MAX_STRAEM_BUNDLE]; /* instance handler */
+} cam_stream_bundle_t;
+
+/* Add enumenrations at the bottom but before MM_CAMERA_PARM_MAX */
+typedef enum {
+    MM_CAMERA_PARM_PICT_SIZE,
+    MM_CAMERA_PARM_ZOOM_RATIO,
+    MM_CAMERA_PARM_HISTOGRAM,
+    MM_CAMERA_PARM_DIMENSION,
+    MM_CAMERA_PARM_FPS,
+    MM_CAMERA_PARM_FPS_MODE, /*5*/
+    MM_CAMERA_PARM_EFFECT,
+    MM_CAMERA_PARM_EXPOSURE_COMPENSATION,
+    MM_CAMERA_PARM_EXPOSURE,
+    MM_CAMERA_PARM_SHARPNESS,
+    MM_CAMERA_PARM_CONTRAST, /*10*/
+    MM_CAMERA_PARM_SATURATION,
+    MM_CAMERA_PARM_BRIGHTNESS,
+    MM_CAMERA_PARM_WHITE_BALANCE,
+    MM_CAMERA_PARM_LED_MODE,
+    MM_CAMERA_PARM_ANTIBANDING, /*15*/
+    MM_CAMERA_PARM_ROLLOFF,
+    MM_CAMERA_PARM_CONTINUOUS_AF,
+    MM_CAMERA_PARM_FOCUS_RECT,
+    MM_CAMERA_PARM_AEC_ROI,
+    MM_CAMERA_PARM_AF_ROI, /*20*/
+    MM_CAMERA_PARM_HJR,
+    MM_CAMERA_PARM_ISO,
+    MM_CAMERA_PARM_BL_DETECTION,
+    MM_CAMERA_PARM_SNOW_DETECTION,
+    MM_CAMERA_PARM_BESTSHOT_MODE, /*25*/
+    MM_CAMERA_PARM_ZOOM,
+    MM_CAMERA_PARM_VIDEO_DIS,
+    MM_CAMERA_PARM_VIDEO_ROT,
+    MM_CAMERA_PARM_SCE_FACTOR,
+    MM_CAMERA_PARM_FD, /*30*/
+    MM_CAMERA_PARM_MODE,
+    /* 2nd 32 bits */
+    MM_CAMERA_PARM_3D_FRAME_FORMAT,
+    MM_CAMERA_PARM_CAMERA_ID,
+    MM_CAMERA_PARM_CAMERA_INFO,
+    MM_CAMERA_PARM_PREVIEW_SIZE, /*35*/
+    MM_CAMERA_PARM_QUERY_FALSH4SNAP,
+    MM_CAMERA_PARM_FOCUS_DISTANCES,
+    MM_CAMERA_PARM_BUFFER_INFO,
+    MM_CAMERA_PARM_JPEG_ROTATION,
+    MM_CAMERA_PARM_JPEG_MAINIMG_QUALITY, /* 40 */
+    MM_CAMERA_PARM_JPEG_THUMB_QUALITY,
+    MM_CAMERA_PARM_ZSL_ENABLE,
+    MM_CAMERA_PARM_FOCAL_LENGTH,
+    MM_CAMERA_PARM_HORIZONTAL_VIEW_ANGLE,
+    MM_CAMERA_PARM_VERTICAL_VIEW_ANGLE, /* 45 */
+    MM_CAMERA_PARM_MCE,
+    MM_CAMERA_PARM_RESET_LENS_TO_INFINITY,
+    MM_CAMERA_PARM_SNAPSHOTDATA,
+    MM_CAMERA_PARM_HFR,
+    MM_CAMERA_PARM_REDEYE_REDUCTION, /* 50 */
+    MM_CAMERA_PARM_WAVELET_DENOISE,
+    MM_CAMERA_PARM_3D_DISPLAY_DISTANCE,
+    MM_CAMERA_PARM_3D_VIEW_ANGLE,
+    MM_CAMERA_PARM_PREVIEW_FORMAT,
+    MM_CAMERA_PARM_RDI_FORMAT,
+    MM_CAMERA_PARM_HFR_SIZE, /* 55 */
+    MM_CAMERA_PARM_3D_EFFECT,
+    MM_CAMERA_PARM_3D_MANUAL_CONV_RANGE,
+    MM_CAMERA_PARM_3D_MANUAL_CONV_VALUE,
+    MM_CAMERA_PARM_ENABLE_3D_MANUAL_CONVERGENCE,
+    /* These are new parameters defined here */
+    MM_CAMERA_PARM_CH_IMAGE_FMT, /* 60 */       // mm_camera_ch_image_fmt_parm_t
+    MM_CAMERA_PARM_OP_MODE,             // camera state, sub state also
+    MM_CAMERA_PARM_SHARPNESS_CAP,       //
+    MM_CAMERA_PARM_SNAPSHOT_BURST_NUM,  // num shots per snapshot action
+    MM_CAMERA_PARM_LIVESHOT_MAIN,       // enable/disable full size live shot
+    MM_CAMERA_PARM_MAXZOOM, /* 65 */
+    MM_CAMERA_PARM_LUMA_ADAPTATION,     // enable/disable
+    MM_CAMERA_PARM_HDR,
+    MM_CAMERA_PARM_CROP,
+    MM_CAMERA_PARM_MAX_PICTURE_SIZE,
+    MM_CAMERA_PARM_MAX_PREVIEW_SIZE, /* 70 */
+    MM_CAMERA_PARM_ASD_ENABLE,
+    MM_CAMERA_PARM_RECORDING_HINT,
+    MM_CAMERA_PARM_CAF_ENABLE,
+    MM_CAMERA_PARM_FULL_LIVESHOT,
+    MM_CAMERA_PARM_DIS_ENABLE, /* 75 */
+    MM_CAMERA_PARM_AEC_LOCK,
+    MM_CAMERA_PARM_AWB_LOCK,
+    MM_CAMERA_PARM_AF_MTR_AREA,
+    MM_CAMERA_PARM_AEC_MTR_AREA,
+    MM_CAMERA_PARM_LOW_POWER_MODE,
+    MM_CAMERA_PARM_MAX_HFR_MODE, /* 80 */
+    MM_CAMERA_PARM_MAX_VIDEO_SIZE,
+    MM_CAMERA_PARM_DEF_PREVIEW_SIZES,
+    MM_CAMERA_PARM_DEF_VIDEO_SIZES,
+    MM_CAMERA_PARM_DEF_THUMB_SIZES,
+    MM_CAMERA_PARM_DEF_HFR_SIZES,
+    MM_CAMERA_PARM_PREVIEW_SIZES_CNT,
+    MM_CAMERA_PARM_VIDEO_SIZES_CNT,
+    MM_CAMERA_PARM_THUMB_SIZES_CNT,
+    MM_CAMERA_PARM_HFR_SIZES_CNT,
+    MM_CAMERA_PARM_GRALLOC_USAGE,
+    MM_CAMERA_PARM_VFE_OUTPUT_ENABLE, //to check whether both oputputs are
+    MM_CAMERA_PARM_DEFAULT_PREVIEW_WIDTH,
+    MM_CAMERA_PARM_DEFAULT_PREVIEW_HEIGHT,
+    MM_CAMERA_PARM_FOCUS_MODE,
+    MM_CAMERA_PARM_HFR_FRAME_SKIP,
+    MM_CAMERA_PARM_CH_INTERFACE,
+    //or single output enabled to differentiate 7x27a with others
+    MM_CAMERA_PARM_BESTSHOT_RECONFIGURE,
+    MM_CAMERA_PARM_MAX_NUM_FACES_DECT,
+    MM_CAMERA_PARM_FPS_RANGE,
+    MM_CAMERA_PARM_CID,
+    MM_CAMERA_PARM_FRAME_RESOLUTION,
+    MM_CAMERA_PARM_RAW_SNAPSHOT_FMT,
+    MM_CAMERA_PARM_FACIAL_FEATURE_INFO,
+    MM_CAMERA_PARM_MOBICAT,
+    MM_CAMERA_PARM_MAX
+} mm_camera_parm_type_t;
+
+typedef enum {
+  STREAM_NONE           =  0x0,
+  STREAM_IMAGE          =  0x1,
+  STREAM_RAW            =  0x2,
+  STREAM_RAW1           =  0x4,
+  STREAM_RAW2           =  0x8,
+} mm_camera_channel_stream_info_t;
+
+typedef enum {
+  CAMERA_SET_PARM_DISPLAY_INFO,
+  CAMERA_SET_PARM_DIMENSION,
+
+  CAMERA_SET_PARM_ZOOM,
+  CAMERA_SET_PARM_SENSOR_POSITION,
+  CAMERA_SET_PARM_FOCUS_RECT,
+  CAMERA_SET_PARM_LUMA_ADAPTATION,
+  CAMERA_SET_PARM_CONTRAST,
+  CAMERA_SET_PARM_BRIGHTNESS,
+  CAMERA_SET_PARM_EXPOSURE_COMPENSATION,
+  CAMERA_SET_PARM_SHARPNESS,
+  CAMERA_SET_PARM_HUE,  /* 10 */
+  CAMERA_SET_PARM_SATURATION,
+  CAMERA_SET_PARM_EXPOSURE,
+  CAMERA_SET_PARM_AUTO_FOCUS,
+  CAMERA_SET_PARM_WB,
+  CAMERA_SET_PARM_EFFECT,
+  CAMERA_SET_PARM_FPS,
+  CAMERA_SET_PARM_FLASH,
+  CAMERA_SET_PARM_NIGHTSHOT_MODE,
+  CAMERA_SET_PARM_REFLECT,
+  CAMERA_SET_PARM_PREVIEW_MODE,  /* 20 */
+  CAMERA_SET_PARM_ANTIBANDING,
+  CAMERA_SET_PARM_RED_EYE_REDUCTION,
+  CAMERA_SET_PARM_FOCUS_STEP,
+  CAMERA_SET_PARM_EXPOSURE_METERING,
+  CAMERA_SET_PARM_AUTO_EXPOSURE_MODE,
+  CAMERA_SET_PARM_ISO,
+  CAMERA_SET_PARM_BESTSHOT_MODE,
+  CAMERA_SET_PARM_ENCODE_ROTATION,
+
+  CAMERA_SET_PARM_PREVIEW_FPS,
+  CAMERA_SET_PARM_AF_MODE,  /* 30 */
+  CAMERA_SET_PARM_HISTOGRAM,
+  CAMERA_SET_PARM_FLASH_STATE,
+  CAMERA_SET_PARM_FRAME_TIMESTAMP,
+  CAMERA_SET_PARM_STROBE_FLASH,
+  CAMERA_SET_PARM_FPS_LIST,
+  CAMERA_SET_PARM_HJR,
+  CAMERA_SET_PARM_ROLLOFF,
+
+  CAMERA_STOP_PREVIEW,
+  CAMERA_START_PREVIEW,
+  CAMERA_START_SNAPSHOT, /* 40 */
+  CAMERA_START_RAW_SNAPSHOT,
+  CAMERA_STOP_SNAPSHOT,
+  CAMERA_EXIT,
+  CAMERA_ENABLE_BSM,
+  CAMERA_DISABLE_BSM,
+  CAMERA_GET_PARM_ZOOM,
+  CAMERA_GET_PARM_MAXZOOM,
+  CAMERA_GET_PARM_ZOOMRATIOS,
+  CAMERA_GET_PARM_AF_SHARPNESS,
+  CAMERA_SET_PARM_LED_MODE, /* 50 */
+  CAMERA_SET_MOTION_ISO,
+  CAMERA_AUTO_FOCUS_CANCEL,
+  CAMERA_GET_PARM_FOCUS_STEP,
+  CAMERA_ENABLE_AFD,
+  CAMERA_PREPARE_SNAPSHOT,
+  CAMERA_SET_FPS_MODE,
+  CAMERA_START_VIDEO,
+  CAMERA_STOP_VIDEO,
+  CAMERA_START_RECORDING,
+  CAMERA_STOP_RECORDING, /* 60 */
+  CAMERA_SET_VIDEO_DIS_PARAMS,
+  CAMERA_SET_VIDEO_ROT_PARAMS,
+  CAMERA_SET_PARM_AEC_ROI,
+  CAMERA_SET_CAF,
+  CAMERA_SET_PARM_BL_DETECTION_ENABLE,
+  CAMERA_SET_PARM_SNOW_DETECTION_ENABLE,
+  CAMERA_SET_PARM_STROBE_FLASH_MODE,
+  CAMERA_SET_PARM_AF_ROI,
+  CAMERA_START_LIVESHOT,
+  CAMERA_SET_SCE_FACTOR, /* 70 */
+  CAMERA_GET_CAPABILITIES,
+  CAMERA_GET_PARM_DIMENSION,
+  CAMERA_GET_PARM_LED_MODE,
+  CAMERA_SET_PARM_FD,
+  CAMERA_GET_PARM_3D_FRAME_FORMAT,
+  CAMERA_QUERY_FLASH_FOR_SNAPSHOT,
+  CAMERA_GET_PARM_FOCUS_DISTANCES,
+  CAMERA_START_ZSL,
+  CAMERA_STOP_ZSL,
+  CAMERA_ENABLE_ZSL, /* 80 */
+  CAMERA_GET_PARM_FOCAL_LENGTH,
+  CAMERA_GET_PARM_HORIZONTAL_VIEW_ANGLE,
+  CAMERA_GET_PARM_VERTICAL_VIEW_ANGLE,
+  CAMERA_SET_PARM_WAVELET_DENOISE,
+  CAMERA_SET_PARM_MCE,
+  CAMERA_ENABLE_STEREO_CAM,
+  CAMERA_SET_PARM_RESET_LENS_TO_INFINITY,
+  CAMERA_GET_PARM_SNAPSHOTDATA,
+  CAMERA_SET_PARM_HFR,
+  CAMERA_SET_REDEYE_REDUCTION, /* 90 */
+  CAMERA_SET_PARM_3D_DISPLAY_DISTANCE,
+  CAMERA_SET_PARM_3D_VIEW_ANGLE,
+  CAMERA_SET_PARM_3D_EFFECT,
+  CAMERA_SET_PARM_PREVIEW_FORMAT,
+  CAMERA_GET_PARM_3D_DISPLAY_DISTANCE, /* 95 */
+  CAMERA_GET_PARM_3D_VIEW_ANGLE,
+  CAMERA_GET_PARM_3D_EFFECT,
+  CAMERA_GET_PARM_3D_MANUAL_CONV_RANGE,
+  CAMERA_SET_PARM_3D_MANUAL_CONV_VALUE,
+  CAMERA_ENABLE_3D_MANUAL_CONVERGENCE, /* 100 */
+  CAMERA_SET_PARM_HDR,
+  CAMERA_SET_ASD_ENABLE,
+  CAMERA_POSTPROC_ABORT,
+  CAMERA_SET_AEC_MTR_AREA,
+  CAMERA_SET_AEC_LOCK,       /*105*/
+  CAMERA_SET_AWB_LOCK,
+  CAMERA_SET_RECORDING_HINT,
+  CAMERA_SET_PARM_CAF,
+  CAMERA_SET_FULL_LIVESHOT,
+  CAMERA_SET_DIS_ENABLE,  /*110*/
+  CAMERA_GET_PARM_MAX_HFR_MODE,
+  CAMERA_SET_LOW_POWER_MODE,
+  CAMERA_GET_PARM_DEF_PREVIEW_SIZES,
+  CAMERA_GET_PARM_DEF_VIDEO_SIZES,
+  CAMERA_GET_PARM_DEF_THUMB_SIZES, /*115*/
+  CAMERA_GET_PARM_DEF_HFR_SIZES,
+  CAMERA_GET_PARM_MAX_LIVESHOT_SIZE,
+  CAMERA_GET_PARM_FPS_RANGE,
+  CAMERA_SET_3A_CONVERGENCE,
+  CAMERA_SET_PREVIEW_HFR, /*120*/
+  CAMERA_GET_MAX_DIMENSION,
+  CAMERA_GET_MAX_NUM_FACES_DECT,
+  CAMERA_SET_CHANNEL_STREAM,
+  CAMERA_GET_CHANNEL_STREAM,
+  CAMERA_SET_PARM_CID, /*125*/
+  CAMERA_GET_PARM_FRAME_RESOLUTION,
+  CAMERA_GET_FACIAL_FEATURE_INFO,
+  CAMERA_GET_PP_MASK, /* get post-processing mask */
+  CAMERA_DO_PP_WNR,   /* do post-process WNR */
+  CAMERA_GET_PARM_HDR,
+  CAMERA_SEND_PP_PIPELINE_CMD, /* send offline pp cmd */
+  CAMERA_SET_BUNDLE, /* set stream bundle */
+  CAMERA_ENABLE_MOBICAT,
+  CAMERA_GET_PARM_MOBICAT,
+  CAMERA_CTRL_PARM_MAX
+} cam_ctrl_type;
+
+typedef enum {
+  CAMERA_ERROR_NO_MEMORY,
+  CAMERA_ERROR_EFS_FAIL,                /* Low-level operation failed */
+  CAMERA_ERROR_EFS_FILE_OPEN,           /* File already opened */
+  CAMERA_ERROR_EFS_FILE_NOT_OPEN,       /* File not opened */
+  CAMERA_ERROR_EFS_FILE_ALREADY_EXISTS, /* File already exists */
+  CAMERA_ERROR_EFS_NONEXISTENT_DIR,     /* User directory doesn't exist */
+  CAMERA_ERROR_EFS_NONEXISTENT_FILE,    /* User directory doesn't exist */
+  CAMERA_ERROR_EFS_BAD_FILE_NAME,       /* Client specified invalid file/directory name*/
+  CAMERA_ERROR_EFS_BAD_FILE_HANDLE,     /* Client specified invalid file/directory name*/
+  CAMERA_ERROR_EFS_SPACE_EXHAUSTED,     /* Out of file system space */
+  CAMERA_ERROR_EFS_OPEN_TABLE_FULL,     /* Out of open-file table slots                */
+  CAMERA_ERROR_EFS_OTHER_ERROR,         /* Other error                                 */
+  CAMERA_ERROR_CONFIG,
+  CAMERA_ERROR_EXIF_ENCODE,
+  CAMERA_ERROR_VIDEO_ENGINE,
+  CAMERA_ERROR_IPL,
+  CAMERA_ERROR_INVALID_FORMAT,
+  CAMERA_ERROR_TIMEOUT,
+  CAMERA_ERROR_ESD,
+  CAMERA_ERROR_MAX
+} camera_error_type;
+
+#if defined CAMERA_ANTIBANDING_OFF
+#undef CAMERA_ANTIBANDING_OFF
+#endif
+
+#if defined CAMERA_ANTIBANDING_60HZ
+#undef CAMERA_ANTIBANDING_60HZ
+#endif
+
+#if defined CAMERA_ANTIBANDING_50HZ
+#undef CAMERA_ANTIBANDING_50HZ
+#endif
+
+#if defined CAMERA_ANTIBANDING_AUTO
+#undef CAMERA_ANTIBANDING_AUTO
+#endif
+
+typedef enum {
+  CAMERA_PP_MASK_TYPE_WNR = 0x01
+} camera_pp_mask_type;
+
+typedef enum {
+  CAMERA_ANTIBANDING_OFF,
+  CAMERA_ANTIBANDING_60HZ,
+  CAMERA_ANTIBANDING_50HZ,
+  CAMERA_ANTIBANDING_AUTO,
+  CAMERA_ANTIBANDING_AUTO_50HZ,
+  CAMERA_ANTIBANDING_AUTO_60HZ,
+  CAMERA_MAX_ANTIBANDING,
+} camera_antibanding_type;
+
+/* Enum Type for different ISO Mode supported */
+typedef enum {
+  CAMERA_ISO_AUTO = 0,
+  CAMERA_ISO_DEBLUR,
+  CAMERA_ISO_100,
+  CAMERA_ISO_200,
+  CAMERA_ISO_400,
+  CAMERA_ISO_800,
+  CAMERA_ISO_1600,
+  CAMERA_ISO_MAX
+} camera_iso_mode_type;
+
+typedef enum {
+  MM_CAMERA_FACIAL_FEATURE_FD, // facial detection
+  MM_CAMERA_FACIAL_FEATURE_MAX
+} camera_facial_features;
+
+typedef enum {
+  AEC_ROI_OFF,
+  AEC_ROI_ON
+} aec_roi_ctrl_t;
+
+typedef enum {
+  AEC_ROI_BY_INDEX,
+  AEC_ROI_BY_COORDINATE,
+} aec_roi_type_t;
+
+typedef struct {
+  uint32_t x;
+  uint32_t y;
+} cam_coordinate_type_t;
+
+/*
+ * Define DRAW_RECTANGLES to draw rectangles on screen. Just for test purpose.
+ */
+//#define DRAW_RECTANGLES
+
+typedef struct {
+  uint16_t x;
+  uint16_t y;
+  uint16_t dx;
+  uint16_t dy;
+} roi_t;
+
+typedef struct {
+  aec_roi_ctrl_t aec_roi_enable;
+  aec_roi_type_t aec_roi_type;
+  union {
+    cam_coordinate_type_t coordinate;
+    uint32_t aec_roi_idx;
+  } aec_roi_position;
+} cam_set_aec_roi_t;
+
+typedef struct {
+  uint32_t frm_id;
+  uint8_t num_roi;
+  roi_t roi[MAX_ROI];
+  uint8_t is_multiwindow;
+} roi_info_t;
+
+/* Exif Tag Data Type */
+typedef enum
+{
+    EXIF_BYTE      = 1,
+    EXIF_ASCII     = 2,
+    EXIF_SHORT     = 3,
+    EXIF_LONG      = 4,
+    EXIF_RATIONAL  = 5,
+    EXIF_UNDEFINED = 7,
+    EXIF_SLONG     = 9,
+    EXIF_SRATIONAL = 10
+} exif_tag_type_t;
+
+
+/* Exif Rational Data Type */
+typedef struct
+{
+    uint32_t  num;    // Numerator
+    uint32_t  denom;  // Denominator
+
+} rat_t;
+
+/* Exif Signed Rational Data Type */
+typedef struct
+{
+    int32_t  num;    // Numerator
+    int32_t  denom;  // Denominator
+
+} srat_t;
+
+typedef struct
+{
+  exif_tag_type_t type;
+  uint8_t copy;
+  uint32_t count;
+  union
+  {
+    char      *_ascii;
+    uint8_t   *_bytes;
+    uint8_t    _byte;
+    uint16_t  *_shorts;
+    uint16_t   _short;
+    uint32_t  *_longs;
+    uint32_t   _long;
+    rat_t     *_rats;
+    rat_t      _rat;
+    uint8_t   *_undefined;
+    int32_t   *_slongs;
+    int32_t    _slong;
+    srat_t    *_srats;
+    srat_t     _srat;
+  } data;
+} exif_tag_entry_t;
+
+typedef struct {
+    uint32_t      tag_id;
+    exif_tag_entry_t  tag_entry;
+} exif_tags_info_t;
+
+
+typedef enum {
+ HDR_BRACKETING_OFF,
+ HDR_MODE,
+ EXP_BRACKETING_MODE
+ } hdr_mode;
+
+typedef struct {
+  hdr_mode mode;
+  uint32_t hdr_enable;
+  uint32_t total_frames;
+  uint32_t total_hal_frames;
+  char values[MAX_EXP_BRACKETING_LENGTH];  /* user defined values */
+} exp_bracketing_t;
+typedef struct {
+  roi_t      mtr_area[MAX_ROI];
+  uint32_t   num_area;
+  int        weight[MAX_ROI];
+} aec_mtr_area_t;
+
+typedef struct {
+  int denoise_enable;
+  int process_plates;
+} denoise_param_t;
+
+#ifndef HAVE_CAMERA_SIZE_TYPE
+  #define HAVE_CAMERA_SIZE_TYPE
+struct camera_size_type {
+  int width;
+  int height;
+};
+#endif
+
+typedef struct {
+  uint32_t yoffset;
+  uint32_t cbcr_offset;
+  uint32_t size;
+  struct camera_size_type resolution;
+}cam_buf_info_t;
+
+typedef struct {
+  int x;
+  int y;
+}cam_point_t;
+
+typedef struct {
+  /* AF parameters */
+  uint8_t focus_position;
+  /* AEC parameters */
+  uint32_t line_count;
+  uint8_t luma_target;
+  /* AWB parameters */
+  int32_t r_gain;
+  int32_t b_gain;
+  int32_t g_gain;
+  uint8_t exposure_mode;
+  uint8_t exposure_program;
+  float exposure_time;
+  uint32_t iso_speed;
+} snapshotData_info_t;
+
+
+typedef enum {
+  CAMERA_HFR_MODE_OFF = 1,
+  CAMERA_HFR_MODE_60FPS,
+  CAMERA_HFR_MODE_90FPS,
+  CAMERA_HFR_MODE_120FPS,
+  CAMERA_HFR_MODE_150FPS,
+} camera_hfr_mode_t;
+
+/* frame Q*/
+struct fifo_node
+{
+  struct fifo_node *next;
+  void *f;
+};
+
+struct fifo_queue
+{
+  int num_of_frames;
+  struct fifo_node *front;
+  struct fifo_node *back;
+  pthread_mutex_t mut;
+  pthread_cond_t wait;
+  char* name;
+};
+
+typedef struct {
+  uint32_t buf_len;
+  uint8_t num;
+  uint8_t pmem_type;
+  uint32_t vaddr[8];
+} mm_camera_histo_mem_info_t;
+
+typedef enum {
+  MM_CAMERA_CTRL_EVT_ZOOM_DONE,
+  MM_CAMERA_CTRL_EVT_AUTO_FOCUS_DONE,
+  MM_CAMERA_CTRL_EVT_PREP_SNAPSHOT,
+  MM_CAMERA_CTRL_EVT_SNAPSHOT_CONFIG_DONE,
+  MM_CAMERA_CTRL_EVT_WDN_DONE, // wavelet denoise done
+  MM_CAMERA_CTRL_EVT_HDR_DONE,
+  MM_CAMERA_CTRL_EVT_ERROR,
+  MM_CAMERA_CTRL_EVT_MAX
+}mm_camera_ctrl_event_type_t;
+
+typedef struct {
+  mm_camera_ctrl_event_type_t evt;
+  cam_ctrl_status_t status;
+  unsigned long cookie;
+} mm_camera_ctrl_event_t;
+
+typedef enum {
+  MM_CAMERA_CH_EVT_STREAMING_ON,
+  MM_CAMERA_CH_EVT_STREAMING_OFF,
+  MM_CAMERA_CH_EVT_STREAMING_ERR,
+  MM_CAMERA_CH_EVT_DATA_DELIVERY_DONE,
+  MM_CAMERA_CH_EVT_DATA_REQUEST_MORE,
+  MM_CAMERA_CH_EVT_MAX
+}mm_camera_ch_event_type_t;
+
+typedef struct {
+  uint32_t ch;
+  mm_camera_ch_event_type_t evt;
+} mm_camera_ch_event_t;
+
+typedef struct {
+  uint32_t index;
+  /* TBD: need more fields for histo stats? */
+} mm_camera_stats_histo_t;
+
+typedef struct  {
+  uint32_t event_id;
+  union {
+    mm_camera_stats_histo_t    stats_histo;
+  } e;
+} mm_camera_stats_event_t;
+
+typedef enum {
+  FD_ROI_TYPE_HEADER,
+  FD_ROI_TYPE_DATA
+} fd_roi_type_t;
+
+typedef struct {
+  int fd_mode;
+  int num_fd;
+} fd_set_parm_t;
+
+typedef struct {
+  uint32_t frame_id;
+  int16_t num_face_detected;
+} fd_roi_header_type;
+
+struct fd_rect_t {
+  uint16_t x;
+  uint16_t y;
+  uint16_t dx;
+  uint16_t dy;
+};
+
+typedef struct {
+  struct fd_rect_t face_boundary;
+  uint16_t left_eye_center[2];
+  uint16_t right_eye_center[2];
+  uint16_t mouth_center[2];
+  uint8_t smile_degree;  //0 -100
+  uint8_t smile_confidence;  //
+  uint8_t blink_detected;  // 0 or 1
+  uint8_t is_face_recognised;  // 0 or 1
+  int8_t gaze_angle;  // -90 -45 0 45 90 for head left to rigth tilt
+  int8_t updown_dir;  // -90 to 90
+  int8_t leftright_dir;  //-90 to 90
+  int8_t roll_dir;  // -90 to 90
+  int8_t left_right_gaze;  // -50 to 50
+  int8_t top_bottom_gaze;  // -50 to 50
+  uint8_t left_blink;  // 0 - 100
+  uint8_t right_blink;  // 0 - 100
+  int8_t id;  // unique id for face tracking within view unless view changes
+  int8_t score;  // score of confidence( 0 -100)
+} fd_face_type;
+
+typedef struct {
+  uint32_t frame_id;
+  uint8_t idx;
+  fd_face_type face;
+} fd_roi_data_type;
+
+struct fd_roi_t {
+  fd_roi_type_t type;
+  union {
+    fd_roi_header_type hdr;
+    fd_roi_data_type data;
+  } d;
+};
+
+typedef struct  {
+  uint32_t event_id;
+  union {
+    mm_camera_histo_mem_info_t histo_mem_info;
+    struct fd_roi_t roi;
+  } e;
+} mm_camera_info_event_t;
+
+typedef struct  {
+  uint32_t trans_id;   /* transaction id */
+  uint32_t evt_type;   /* event type */
+  int32_t data_length; /* the length of valid data */
+  uint8_t evt_data[1]; /* buffer that holds the content of private event, must be flatten */
+} mm_camera_private_event_t;
+
+typedef enum {
+  MM_CAMERA_EVT_TYPE_CH,
+  MM_CAMERA_EVT_TYPE_CTRL,
+  MM_CAMERA_EVT_TYPE_STATS,
+  MM_CAMERA_EVT_TYPE_INFO,
+  MM_CAMERA_EVT_TYPE_PRIVATE_EVT,
+  MM_CAMERA_EVT_TYPE_MAX
+} mm_camera_event_type_t;
+
+typedef struct {
+  mm_camera_event_type_t event_type;
+  union {
+    mm_camera_ch_event_t ch;
+    mm_camera_ctrl_event_t ctrl;
+    mm_camera_stats_event_t stats;
+    mm_camera_info_event_t info;
+    mm_camera_private_event_t pri_evt;
+  } e;
+} mm_camera_event_t;
+
+typedef enum {
+  MM_CAMERA_REPRO_CMD_INVALID,
+  MM_CAMERA_REPRO_CMD_OPEN,
+  MM_CAMERA_REPRO_CMD_CONFIG,
+  MM_CAMERA_REPRO_CMD_ATTACH_DETACH,
+  MM_CAMERA_REPRO_CMD_START_STOP,
+  MM_CAMERA_REPRO_CMD_REPROCESS,
+  MM_CAMERA_REPRO_CMD_CLOSE,
+  MM_CAMERA_REPRO_CMD_MAX
+} mmcam_repro_cmd_type_t;
+
+/* re-process isp type defintion */
+typedef enum {
+  MM_CAMERA_REPRO_ISP_NOT_USED,
+  MM_CAMERA_REPRO_ISP_PIX,
+  MM_CAMERA_REPRO_ISP_CROP_AND_SCALING,
+  MM_CAMERA_REPRO_ISP_COLOR_CONVERSION,
+  MM_CAMERA_REPRO_ISP_DNOISE_AND_SHARPNESS,
+  MM_CAMERA_REPRO_ISP_MAX_NUM
+} mm_camera_repro_isp_type_t;
+
+typedef struct {
+  uint32_t addr_offset;
+  uint32_t length;
+  uint32_t data_offset;
+} mm_camera_repro_plane_t;
+
+typedef struct {
+  uint32_t repro_handle;  /* repo isp handle */
+  uint32_t inst_handle; /* instance handle */
+  int8_t   buf_idx;     /* buffer index    */
+  uint32_t frame_id;    /* frame id        */
+  uint32_t frame_len;   /* frame length    */
+  int8_t   num_planes;
+  mm_camera_repro_plane_t planes[VIDEO_MAX_PLANES];
+  struct timeval timestamp;
+} mm_camera_repro_cmd_reprocess_t;
+
+#define MM_CAMERA_MAX_NUM_REPROCESS_DEST 2
+
+typedef struct {
+  uint8_t  isp_type;      /* in: mm_camera_repro_isp_type_t */
+  uint32_t repro_handle;  /* out */
+} mm_camera_repro_cmd_open_t;
+
+typedef struct {
+  int image_mode;
+  int width;
+  int height;
+  cam_format_t format;
+  uint32_t inst_handle; /* stream handler */
+} mm_camera_repro_config_data_t;
+
+typedef struct {
+  uint32_t repro_handle;
+  int num_dest;
+  mm_camera_repro_config_data_t src;
+  mm_camera_repro_config_data_t dest[MM_CAMERA_MAX_NUM_REPROCESS_DEST];
+} mm_camera_repro_cmd_config_t;
+
+typedef struct {
+  uint32_t repro_handle;   /* repro isp handle */
+  uint32_t inst_handle;    /* instance handle of dest stream */
+  uint8_t  attach_flag;    /* flag: attach(TRUE)/detach(FALSE) */
+} mm_camera_repro_cmd_attach_detach_t;
+
+typedef struct {
+  uint32_t repro_handle;   /* repo isp handle */
+  uint32_t dest_handle;    /* Which destination to start/stop */
+  uint8_t  start_flag;     /* flag: start isp(TRUE)/stop isp(FALSE) */
+} mm_camera_repro_cmd_start_stop_t;
+
+typedef struct {
+  /* mm_camera_repro_cmd_type_t */
+  int cmd;
+  /* Union of the possible payloads for
+   * this reprocess command. */
+  union {
+    /* MM_CAMERA_REPRO_CMD_OPEN */
+    mm_camera_repro_cmd_open_t open;
+    /* MM_CAMERA_REPRO_CMD_CONFIG */
+    mm_camera_repro_cmd_config_t config;
+    /* MM_CAMERA_REPRO_CMD_ATTACH_DETACH */
+    mm_camera_repro_cmd_attach_detach_t attach_detach;
+    /* MM_CAMERA_REPRO_CMD_REPROCESS */
+    mm_camera_repro_cmd_reprocess_t reprocess;
+    /* MM_CAMERA_REPRO_CMD_START_STOP */
+    mm_camera_repro_cmd_start_stop_t start_stop;
+    /* MM_CAMERA_REPRO_CMD_CLOSE */
+    uint32_t repro_handle;
+  } payload;
+} mm_camera_repro_cmd_t;
+
+typedef struct {
+  /*input parameter*/
+  int enable;
+  /*output parameter*/
+  uint32_t mobicat_size;
+}mm_cam_mobicat_info_t;
+
+#define MAX_MOBICAT_SIZE 8092
+
+/*
+  WARNING: Since this data structure is huge,
+  never use it as local variable, otherwise, it so easy to cause
+  stack overflow
+  Always use malloc to allocate heap memory for it
+*/
+typedef struct {
+  int max_len;   //telling the client max sizen of tags, here 10k.
+  int data_len;  //client return real size including null "\0".
+  char tags[MAX_MOBICAT_SIZE];
+} cam_exif_tags_t;
+
+/******************************************************************************
+ * Function: exif_set_tag
+ * Description: Inserts or modifies an Exif tag to the Exif Info object. Typical
+ *              use is to call this function multiple times - to insert all the
+ *              desired Exif Tags individually to the Exif Info object and
+ *              then pass the info object to the Jpeg Encoder object so
+ *              the inserted tags would be emitted as tags in the Exif header.
+ * Input parameters:
+ *   obj       - The Exif Info object where the tag would be inserted to or
+ *               modified from.
+ *   tag_id    - The Exif Tag ID of the tag to be inserted/modified.
+ *   p_entry   - The pointer to the tag entry structure which contains the
+ *               details of tag. The pointer can be set to NULL to un-do
+ *               previous insertion for a certain tag.
+ * Return values:
+ *     JPEGERR_SUCCESS
+ *     JPEGERR_ENULLPTR
+ *     JPEGERR_EFAILED
+ * (See jpegerr.h for description of error values.)
+ * Notes: none
+ *****************************************************************************/
+int exif_set_tag(exif_info_obj_t    obj,
+                 exif_tag_id_t      tag_id,
+                 exif_tag_entry_t  *p_entry);
+
+
+#endif /* __QCAMERA_INTF_H__ */
diff --git a/camera/common.mk b/camera/common.mk
new file mode 100644
index 0000000..a872679
--- /dev/null
+++ b/camera/common.mk
@@ -0,0 +1,9 @@
+common_deps :=
+kernel_includes :=
+
+ifeq ($(call is-vendor-board-platform,QCOM),true)
+ifeq ($(TARGET_COMPILE_WITH_MSM_KERNEL),true)
+    common_deps += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
+    kernel_includes += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include
+endif
+endif
diff --git a/camera/mm-image-codec/Android.mk b/camera/mm-image-codec/Android.mk
new file mode 100644
index 0000000..19b1346
--- /dev/null
+++ b/camera/mm-image-codec/Android.mk
@@ -0,0 +1,3 @@
+ifeq ($(TARGET_ARCH),$(filter $(TARGET_ARCH),arm arm64))
+include $(call all-subdir-makefiles)
+endif
diff --git a/camera/mm-image-codec/qexif/qexif.h b/camera/mm-image-codec/qexif/qexif.h
new file mode 100644
index 0000000..91aedde
--- /dev/null
+++ b/camera/mm-image-codec/qexif/qexif.h
@@ -0,0 +1,1728 @@
+/*Copyright (c) 2012, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
+
+
+#ifndef __QEXIF_H__
+#define __QEXIF_H__
+
+#include <stdio.h>
+
+/* Exif Info (opaque definition) */
+struct exif_info_t;
+typedef struct exif_info_t * exif_info_obj_t;
+
+/* Exif Tag ID */
+typedef uint32_t exif_tag_id_t;
+
+
+/* Exif Rational Data Type */
+typedef struct
+{
+    uint32_t  num;    // Numerator
+    uint32_t  denom;  // Denominator
+
+} rat_t;
+
+/* Exif Signed Rational Data Type */
+typedef struct
+{
+    int32_t  num;    // Numerator
+    int32_t  denom;  // Denominator
+
+} srat_t;
+
+/* Exif Tag Data Type */
+typedef enum
+{
+    EXIF_BYTE      = 1,
+    EXIF_ASCII     = 2,
+    EXIF_SHORT     = 3,
+    EXIF_LONG      = 4,
+    EXIF_RATIONAL  = 5,
+    EXIF_UNDEFINED = 7,
+    EXIF_SLONG     = 9,
+    EXIF_SRATIONAL = 10
+} exif_tag_type_t;
+
+/* Exif Tag Entry
+ * Used in exif_set_tag as an input argument and
+ * in exif_get_tag as an output argument. */
+typedef struct
+{
+    /* The Data Type of the Tag *
+     * Rational, etc */
+    exif_tag_type_t type;
+
+    /* Copy
+     * This field is used when a user pass this structure to
+     * be stored in an exif_info_t via the exif_set_tag method.
+     * The routine would look like this field and decide whether
+     * it is necessary to make a copy of the data pointed by this
+     * structure (all string and array types).
+     * If this field is set to false, only a pointer to the actual
+     * data is retained and it is the caller's responsibility to
+     * ensure the validity of the data before the exif_info_t object
+     * is destroyed.
+     */
+    uint8_t copy;
+
+    /* Data count
+     * This indicates the number of elements of the data. For example, if
+     * the type is EXIF_BYTE and the count is 1, that means the actual data
+     * is one byte and is accessible by data._byte. If the type is EXIF_BYTE
+     * and the count is more than one, the actual data is contained in an
+     * array and is accessible by data._bytes. In case of EXIF_ASCII, it
+     * indicates the string length and in case of EXIF_UNDEFINED, it indicates
+     * the length of the array.
+     */
+    uint32_t count;
+
+    /* Data
+     * A union which covers all possible data types. The user should pick
+     * the right field to use depending on the data type and the count.
+     * See in-line comment below.
+     */
+    union
+    {
+        char      *_ascii;      // EXIF_ASCII (count indicates string length)
+        uint8_t   *_bytes;      // EXIF_BYTE  (count > 1)
+        uint8_t    _byte;       // EXIF_BYTE  (count = 1)
+        uint16_t  *_shorts;     // EXIF_SHORT (count > 1)
+        uint16_t   _short;      // EXIF_SHORT (count = 1)
+        uint32_t  *_longs;      // EXIF_LONG  (count > 1)
+        uint32_t   _long;       // EXIF_LONG  (count = 1)
+        rat_t     *_rats;       // EXIF_RATIONAL  (count > 1)
+        rat_t      _rat;        // EXIF_RATIONAL  (count = 1)
+        uint8_t   *_undefined;  // EXIF_UNDEFINED (count indicates length)
+        int32_t   *_slongs;     // EXIF_SLONG (count > 1)
+        int32_t    _slong;      // EXIF_SLONG (count = 1)
+        srat_t    *_srats;      // EXIF_SRATIONAL (count > 1)
+        srat_t     _srat;       // EXIF_SRATIONAL (count = 1)
+
+    } data;
+
+} exif_tag_entry_t;
+
+/* =======================================================================
+**                          Macro Definitions
+** ======================================================================= */
+/* Enum defined to let compiler generate unique offset numbers for different
+ * tags - ordering matters! NOT INTENDED to be used by any application. */
+typedef enum
+{
+    // GPS IFD
+    GPS_VERSION_ID = 0,
+    GPS_LATITUDE_REF,
+    GPS_LATITUDE,
+    GPS_LONGITUDE_REF,
+    GPS_LONGITUDE,
+    GPS_ALTITUDE_REF,
+    GPS_ALTITUDE,
+    GPS_TIMESTAMP,
+    GPS_SATELLITES,
+    GPS_STATUS,
+    GPS_MEASUREMODE,
+    GPS_DOP,
+    GPS_SPEED_REF,
+    GPS_SPEED,
+    GPS_TRACK_REF,
+    GPS_TRACK,
+    GPS_IMGDIRECTION_REF,
+    GPS_IMGDIRECTION,
+    GPS_MAPDATUM,
+    GPS_DESTLATITUDE_REF,
+    GPS_DESTLATITUDE,
+    GPS_DESTLONGITUDE_REF,
+    GPS_DESTLONGITUDE,
+    GPS_DESTBEARING_REF,
+    GPS_DESTBEARING,
+    GPS_DESTDISTANCE_REF,
+    GPS_DESTDISTANCE,
+    GPS_PROCESSINGMETHOD,
+    GPS_AREAINFORMATION,
+    GPS_DATESTAMP,
+    GPS_DIFFERENTIAL,
+
+    // TIFF IFD
+    NEW_SUBFILE_TYPE,
+    SUBFILE_TYPE,
+    IMAGE_WIDTH,
+    IMAGE_LENGTH,
+    BITS_PER_SAMPLE,
+    COMPRESSION,
+    PHOTOMETRIC_INTERPRETATION,
+    THRESH_HOLDING,
+    CELL_WIDTH,
+    CELL_HEIGHT,
+    FILL_ORDER,
+    DOCUMENT_NAME,
+    IMAGE_DESCRIPTION,
+    MAKE,
+    MODEL,
+    STRIP_OFFSETS,
+    ORIENTATION,
+    SAMPLES_PER_PIXEL,
+    ROWS_PER_STRIP,
+    STRIP_BYTE_COUNTS,
+    MIN_SAMPLE_VALUE,
+    MAX_SAMPLE_VALUE,
+    X_RESOLUTION,
+    Y_RESOLUTION,
+    PLANAR_CONFIGURATION,
+    PAGE_NAME,
+    X_POSITION,
+    Y_POSITION,
+    FREE_OFFSET,
+    FREE_BYTE_COUNTS,
+    GRAY_RESPONSE_UNIT,
+    GRAY_RESPONSE_CURVE,
+    T4_OPTION,
+    T6_OPTION,
+    RESOLUTION_UNIT,
+    PAGE_NUMBER,
+    TRANSFER_FUNCTION,
+    SOFTWARE,
+    DATE_TIME,
+    ARTIST,
+    HOST_COMPUTER,
+    PREDICTOR,
+    WHITE_POINT,
+    PRIMARY_CHROMATICITIES,
+    COLOR_MAP,
+    HALFTONE_HINTS,
+    TILE_WIDTH,
+    TILE_LENGTH,
+    TILE_OFFSET,
+    TILE_BYTE_COUNTS,
+    INK_SET,
+    INK_NAMES,
+    NUMBER_OF_INKS,
+    DOT_RANGE,
+    TARGET_PRINTER,
+    EXTRA_SAMPLES,
+    SAMPLE_FORMAT,
+    TRANSFER_RANGE,
+    JPEG_PROC,
+    JPEG_INTERCHANGE_FORMAT,
+    JPEG_INTERCHANGE_FORMAT_LENGTH,
+    JPEG_RESTART_INTERVAL,
+    JPEG_LOSSLESS_PREDICTORS,
+    JPEG_POINT_TRANSFORMS,
+    JPEG_Q_TABLES,
+    JPEG_DC_TABLES,
+    JPEG_AC_TABLES,
+    YCBCR_COEFFICIENTS,
+    YCBCR_SUB_SAMPLING,
+    YCBCR_POSITIONING,
+    REFERENCE_BLACK_WHITE,
+    GAMMA,
+    ICC_PROFILE_DESCRIPTOR,
+    SRGB_RENDERING_INTENT,
+    IMAGE_TITLE,
+    COPYRIGHT,
+    EXIF_IFD,
+    ICC_PROFILE,
+    GPS_IFD,
+
+
+    // TIFF IFD (Thumbnail)
+    TN_IMAGE_WIDTH,
+    TN_IMAGE_LENGTH,
+    TN_BITS_PER_SAMPLE,
+    TN_COMPRESSION,
+    TN_PHOTOMETRIC_INTERPRETATION,
+    TN_IMAGE_DESCRIPTION,
+    TN_MAKE,
+    TN_MODEL,
+    TN_STRIP_OFFSETS,
+    TN_ORIENTATION,
+    TN_SAMPLES_PER_PIXEL,
+    TN_ROWS_PER_STRIP,
+    TN_STRIP_BYTE_COUNTS,
+    TN_X_RESOLUTION,
+    TN_Y_RESOLUTION,
+    TN_PLANAR_CONFIGURATION,
+    TN_RESOLUTION_UNIT,
+    TN_TRANSFER_FUNCTION,
+    TN_SOFTWARE,
+    TN_DATE_TIME,
+    TN_ARTIST,
+    TN_WHITE_POINT,
+    TN_PRIMARY_CHROMATICITIES,
+    TN_JPEGINTERCHANGE_FORMAT,
+    TN_JPEGINTERCHANGE_FORMAT_L,
+    TN_YCBCR_COEFFICIENTS,
+    TN_YCBCR_SUB_SAMPLING,
+    TN_YCBCR_POSITIONING,
+    TN_REFERENCE_BLACK_WHITE,
+    TN_COPYRIGHT,
+
+    // EXIF IFD
+    EXPOSURE_TIME,
+    F_NUMBER,
+    EXPOSURE_PROGRAM,
+    SPECTRAL_SENSITIVITY,
+    ISO_SPEED_RATING,
+    OECF,
+    EXIF_VERSION,
+    EXIF_DATE_TIME_ORIGINAL,
+    EXIF_DATE_TIME_DIGITIZED,
+    EXIF_COMPONENTS_CONFIG,
+    EXIF_COMPRESSED_BITS_PER_PIXEL,
+    SHUTTER_SPEED,
+    APERTURE,
+    BRIGHTNESS,
+    EXPOSURE_BIAS_VALUE,
+    MAX_APERTURE,
+    SUBJECT_DISTANCE,
+    METERING_MODE,
+    LIGHT_SOURCE,
+    FLASH,
+    FOCAL_LENGTH,
+    SUBJECT_AREA,
+    EXIF_MAKER_NOTE,
+    EXIF_USER_COMMENT,
+    SUBSEC_TIME,
+    SUBSEC_TIME_ORIGINAL,
+    SUBSEC_TIME_DIGITIZED,
+    EXIF_FLASHPIX_VERSION,
+    EXIF_COLOR_SPACE,
+    EXIF_PIXEL_X_DIMENSION,
+    EXIF_PIXEL_Y_DIMENSION,
+    RELATED_SOUND_FILE,
+    INTEROP,
+    FLASH_ENERGY,
+    SPATIAL_FREQ_RESPONSE,
+    FOCAL_PLANE_X_RESOLUTION,
+    FOCAL_PLANE_Y_RESOLUTION,
+    FOCAL_PLANE_RESOLUTION_UNIT,
+    SUBJECT_LOCATION,
+    EXPOSURE_INDEX,
+    SENSING_METHOD,
+    FILE_SOURCE,
+    SCENE_TYPE,
+    CFA_PATTERN,
+    CUSTOM_RENDERED,
+    EXPOSURE_MODE,
+    WHITE_BALANCE,
+    DIGITAL_ZOOM_RATIO,
+    FOCAL_LENGTH_35MM,
+    SCENE_CAPTURE_TYPE,
+    GAIN_CONTROL,
+    CONTRAST,
+    SATURATION,
+    SHARPNESS,
+    DEVICE_SETTINGS_DESCRIPTION,
+    SUBJECT_DISTANCE_RANGE,
+    IMAGE_UID,
+    PIM,
+
+    EXIF_TAG_MAX_OFFSET
+
+} exif_tag_offset_t;
+
+/* Below are the supported Tags (ID and structure for their data) */
+#define CONSTRUCT_TAGID(offset,ID) (offset << 16 | ID)
+
+// GPS tag version
+// Use EXIFTAGTYPE_GPS_VERSION_ID as the exif_tag_type (EXIF_BYTE)
+// Count should be 4
+#define _ID_GPS_VERSION_ID 0x0000
+#define EXIFTAGID_GPS_VERSION_ID \
+  CONSTRUCT_TAGID(GPS_VERSION_ID, _ID_GPS_VERSION_ID)
+#define EXIFTAGTYPE_GPS_VERSION_ID EXIF_BYTE
+// North or South Latitude
+// Use EXIFTAGTYPE_GPS_LATITUDE_REF as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+#define _ID_GPS_LATITUDE_REF 0x0001
+#define EXIFTAGID_GPS_LATITUDE_REF \
+  CONSTRUCT_TAGID(GPS_LATITUDE_REF, _ID_GPS_LATITUDE_REF)
+#define EXIFTAGTYPE_GPS_LATITUDE_REF EXIF_ASCII
+// Latitude
+// Use EXIFTAGTYPE_GPS_LATITUDE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 3
+#define _ID_GPS_LATITUDE 0x0002
+#define EXIFTAGID_GPS_LATITUDE CONSTRUCT_TAGID(GPS_LATITUDE, _ID_GPS_LATITUDE)
+#define EXIFTAGTYPE_GPS_LATITUDE EXIF_RATIONAL
+// East or West Longitude
+// Use EXIFTAGTYPE_GPS_LONGITUDE_REF as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+#define _ID_GPS_LONGITUDE_REF 0x0003
+#define EXIFTAGID_GPS_LONGITUDE_REF \
+  CONSTRUCT_TAGID(GPS_LONGITUDE_REF, _ID_GPS_LONGITUDE_REF)
+#define EXIFTAGTYPE_GPS_LONGITUDE_REF EXIF_ASCII
+// Longitude
+// Use EXIFTAGTYPE_GPS_LONGITUDE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 3
+#define _ID_GPS_LONGITUDE 0x0004
+#define EXIFTAGID_GPS_LONGITUDE \
+  CONSTRUCT_TAGID(GPS_LONGITUDE, _ID_GPS_LONGITUDE)
+#define EXIFTAGTYPE_GPS_LONGITUDE EXIF_RATIONAL
+// Altitude reference
+// Use EXIFTAGTYPE_GPS_ALTITUDE_REF as the exif_tag_type (EXIF_BYTE)
+#define _ID_GPS_ALTITUDE_REF 0x0005
+#define EXIFTAGID_GPS_ALTITUDE_REF \
+  CONSTRUCT_TAGID(GPS_ALTITUDE_REF, _ID_GPS_ALTITUDE_REF)
+#define EXIFTAGTYPE_GPS_ALTITUDE_REF EXIF_BYTE
+// Altitude
+// Use EXIFTAGTYPE_GPS_ALTITUDE as the exif_tag_type (EXIF_RATIONAL)
+#define _ID_GPS_ALTITUDE 0x0006
+#define EXIFTAGID_GPS_ALTITUDE CONSTRUCT_TAGID(GPS_ALTITUDE, _ID_GPS_ALTITUDE)
+#define EXIFTAGTYPE_GPS_ALTITUE EXIF_RATIONAL
+// GPS time (atomic clock)
+// Use EXIFTAGTYPE_GPS_TIMESTAMP as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 3
+#define _ID_GPS_TIMESTAMP 0x0007
+#define EXIFTAGID_GPS_TIMESTAMP \
+  CONSTRUCT_TAGID(GPS_TIMESTAMP, _ID_GPS_TIMESTAMP)
+#define EXIFTAGTYPE_GPS_TIMESTAMP EXIF_RATIONAL
+// GPS Satellites
+// Use EXIFTAGTYPE_GPS_SATELLITES as the exif_tag_type (EXIF_ASCII)
+// Count can be anything.
+#define _ID_GPS_SATELLITES 0x0008
+#define EXIFTAGID_GPS_SATELLITES \
+ CONSTRUCT_TAGID(GPS_SATELLITES, _ID_GPS_SATELLITES)
+#define EXIFTAGTYPE_GPS_SATELLITES EXIF_ASCII
+// GPS Status
+// Use EXIFTAGTYPE_GPS_STATUS as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+// "A" - Measurement in progress
+// "V" - Measurement Interoperability
+// Other - Reserved
+#define _ID_GPS_STATUS 0x0009
+#define EXIFTAGID_GPS_STATUS CONSTRUCT_TAGID(GPS_STATUS, _ID_GPS_STATUS)
+#define EXIFTATTYPE_GPS_STATUS EXIF_ASCII
+// GPS Measure Mode
+// Use EXIFTAGTYPE_GPS_MEASUREMODE as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+// "2" - 2-dimensional measurement
+// "3" - 3-dimensional measurement
+// Other - Reserved
+#define _ID_GPS_MEASUREMODE 0x000a
+#define EXIFTAGID_GPS_MEASUREMODE \
+  CONSTRUCT_TAGID(GPS_MEASUREMODE, _ID_GPS_MEASUREMODE)
+#define EXIFTAGTYPE_GPS_MEASUREMODE EXIF_ASCII
+// GPS Measurement precision (DOP)
+// Use EXIFTAGTYPE_GPS_DOP as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_GPS_DOP 0x000b
+#define EXIFTAGID_GPS_DOP CONSTRUCT_TAGID(GPS_DOP, _ID_GPS_DOP)
+#define EXIFTAGTYPE_GPS_DOP EXIF_RATIONAL
+// Speed Unit
+// Use EXIFTAGTYPE_GPS_SPEED_REF as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+// "K" - Kilometers per hour
+// "M" - Miles per hour
+// "N" - Knots
+// Other - Reserved
+#define _ID_GPS_SPEED_REF 0x000c
+#define EXIFTAGID_GPS_SPEED_REF \
+  CONSTRUCT_TAGID(GPS_SPEED_REF, _ID_GPS_SPEED_REF)
+#define EXIFTAGTYPE_GPS_SPEED_REF EXIF_ASCII
+// Speed of GPS receiver
+// Use EXIFTAGTYPE_GPS_SPEED as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_GPS_SPEED 0x000d
+#define EXIFTAGID_GPS_SPEED CONSTRUCT_TAGID(GPS_SPEED, _ID_GPS_SPEED)
+#define EXIFTAGTYPE_GPS_SPEED EXIF_RATIONAL
+// Reference of direction of movement
+// Use EXIFTAGTYPE_GPS_TRACK_REF as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+// "T" - True direction
+// "M" - Magnetic direction
+// Other - Reserved
+#define _ID_GPS_TRACK_REF 0x000e
+#define EXIFTAGID_GPS_TRACK_REF \
+  CONSTRUCT_TAGID(GPS_TRACK_REF, _ID_GPS_TRACK_REF)
+#define EXIFTAGTYPE_GPS_TRACK_REF EXIF_ASCII
+// Direction of movement
+// Use EXIFTAGTYPE_GPS_TRACK as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_GPS_TRACK 0x000f
+#define EXIFTAGID_GPS_TRACK CONSTRUCT_TAGID(GPS_TRACK, _ID_GPS_TRACK)
+#define EXIFTAGTYPE_GPS_TRACK EXIF_RATIONAL
+// Reference of direction of image
+// Use EXIFTAGTYPE_GPS_IMGDIRECTION_REF as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+// "T" - True direction
+// "M" - Magnetic direction
+// Other - Reserved
+#define _ID_GPS_IMGDIRECTION_REF 0x0010
+#define EXIFTAGID_GPS_IMGDIRECTION_REF \
+  CONSTRUCT_TAGID(GPS_IMGDIRECTION_REF, _ID_GPS_IMGDIRECTION_REF)
+#define EXIFTAGTYPE_GPS_IMGDIRECTION_REF EXIF_ASCII
+// Direction of image
+// Use EXIFTAGTYPE_GPS_IMGDIRECTION as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_GPS_IMGDIRECTION 0x0011
+#define EXIFTAGID_GPS_IMGDIRECTION \
+  CONSTRUCT_TAGID(GPS_IMGDIRECTION, _ID_GPS_IMGDIRECTION)
+#define EXIFTAGTYPE_GPS_IMGDIRECTION EXIF_RATIONAL
+// Geodetic survey data used
+// Use EXIFTAGTYPE_GPS_MAPDATUM as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_GPS_MAPDATUM 0x0012
+#define EXIFTAGID_GPS_MAPDATUM CONSTRUCT_TAGID(GPS_MAPDATUM, _ID_GPS_MAPDATUM)
+#define EXIFTAGTYPE_GPS_MAPDATUM EXIF_ASCII
+// Reference for latitude of destination
+// Use EXIFTAGTYPE_GPS_DESTLATITUDE_REF as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+// "N" - North latitude
+// "S" - South latitude
+// Other - Reserved
+#define _ID_GPS_DESTLATITUDE_REF 0x0013
+#define EXIFTAGID_GPS_DESTLATITUDE_REF \
+  CONSTRUCT_TAGID(GPS_DESTLATITUDE_REF, _ID_GPS_DESTLATITUDE_REF)
+#define EXIFTAGTYPE_GPS_DESTLATITUDE_REF EXIF_ASCII
+// Latitude of destination
+// Use EXIFTAGTYPE_GPS_DESTLATITUDE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 3
+#define _ID_GPS_DESTLATITUDE 0x0014
+#define EXIFTAGID_GPS_DESTLATITUDE \
+  CONSTRUCT_TAGID(GPS_DESTLATITUDE, _ID_GPS_DESTLATITUDE)
+#define EXIFTAGTYPE_GPS_DESTLATITUDE EXIF_RATIONAL
+// Reference for longitude of destination
+// Use EXIFTAGTYPE_GPS_DESTLONGITUDE_REF as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+// "E" - East longitude
+// "W" - West longitude
+// Other - Reserved
+#define _ID_GPS_DESTLONGITUDE_REF 0x0015
+#define EXIFTAGID_GPS_DESTLONGITUDE_REF \
+  CONSTRUCT_TAGID(GPS_DESTLONGITUDE_REF, _ID_GPS_DESTLONGITUDE_REF)
+#define EXIFTAGTYPE_GPS_DESTLONGITUDE_REF EXIF_ASCII
+// Longitude of destination
+// Use EXIFTAGTYPE_GPS_DESTLONGITUDE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 3
+#define _ID_GPS_DESTLONGITUDE 0x0016
+#define EXIFTAGID_GPS_DESTLONGITUDE CONSTRUCT_TAGID(GPS_DESTLONGITUDE, _ID_GPS_DESTLONGITUDE)
+#define EXIFTAGTYPE_GPS_DESTLONGITUDE EXIF_RATIONAL
+// Reference for bearing of destination
+// Use EXIFTAGTYPE_GPS_DESTBEARING_REF as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+// "T" - True direction
+// "M" - Magnetic direction
+// Other - Reserved
+#define _ID_GPS_DESTBEARING_REF 0x0017
+#define EXIFTAGID_GPS_DESTBEARING_REF \
+  CONSTRUCT_TAGID(GPS_DESTBEARING_REF, _ID_GPS_DESTBEARING_REF)
+#define EXIFTAGTYPE_GPS_DESTBEARING_REF EXIF_ASCII
+// Bearing of destination
+// Use EXIFTAGTYPE_GPS_DESTBEARING as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_GPS_DESTBEARING 0x0018
+#define EXIFTAGID_GPS_DESTBEARING \
+  CONSTRUCT_TAGID(GPS_DESTBEARING, _ID_GPS_DESTBEARING)
+#define EXIFTAGTYPE_GPS_DESTBEARING EXIF_RATIONAL
+// Reference for distance to destination
+// Use EXIFTAGTYPE_GPS_DESTDISTANCE_REF as the exif_tag_type (EXIF_ASCII)
+// It should be 2 characters long including the null-terminating character.
+// "K" - Kilometers per hour
+// "M" - Miles per hour
+// "N" - Knots
+// Other - Reserved
+#define _ID_GPS_DESTDISTANCE_REF 0x0019
+#define EXIFTAGID_GPS_DESTDISTANCE_REF \
+  CONSTRUCT_TAGID(GPS_DESTDISTANCE_REF, _ID_GPS_DESTDISTANCE_REF)
+#define EXIFTAGTYPE_GPS_DESTDISTANCE_REF EXIF_ASCII
+// Distance to destination
+// Use EXIFTAGTYPE_GPS_DESTDISTANCE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_GPS_DESTDISTANCE 0x001a
+#define EXIFTAGID_GPS_DESTDISTANCE \
+  CONSTRUCT_TAGID(GPS_DESTDISTANCE, _ID_GPS_DESTDISTANCE)
+#define EXIFTAGTYPE_GPS_DESTDISTANCE EXIF_RATIONAL
+// Name of GPS processing method
+// Use EXIFTAGTYPE_GPS_PROCESSINGMETHOD as the exif_tag_type (EXIF_UNDEFINED)
+// Count can be any
+#define _ID_GPS_PROCESSINGMETHOD 0x001b
+#define EXIFTAGID_GPS_PROCESSINGMETHOD \
+  CONSTRUCT_TAGID(GPS_PROCESSINGMETHOD, _ID_GPS_PROCESSINGMETHOD)
+#define EXIFTAGTYPE_GPS_PROCESSINGMETHOD EXIF_UNDEFINED
+// Name of GPS area
+// Use EXIFTAGTYPE_GPS_AREAINFORMATION as the exif_tag_type (EXIF_UNDEFINED)
+// Count can be any
+#define _ID_GPS_AREAINFORMATION 0x001c
+#define EXIFTAGID_GPS_AREAINFORMATION \
+  CONSTRUCT_TAGID(GPS_AREAINFORMATION, _ID_GPS_AREAINFORMATION)
+#define EXIFTAGTYPE_GPS_AREAINFORMATION EXIF_UNDEFINED
+// GPS date
+// Use EXIFTAGTYPE_GPS_DATESTAMP as the exif_tag_type (EXIF_ASCII)
+// It should be 11 characters long including the null-terminating character.
+#define _ID_GPS_DATESTAMP 0x001d
+#define EXIFTAGID_GPS_DATESTAMP \
+  CONSTRUCT_TAGID(GPS_DATESTAMP, _ID_GPS_DATESTAMP)
+#define EXIFTAGTYPE_GPS_DATESTAMP EXIF_ASCII
+// GPS differential correction
+// Use EXIFTAGTYPE_GPS_DIFFERENTIAL as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+// 0 - Measurement without differential correction
+// 1 - Differential correction applied
+// Other - Reserved
+#define _ID_GPS_DIFFERENTIAL 0x001e
+#define EXIFTAGID_GPS_DIFFERENTIAL \
+  CONSTRUCT_TAGID(GPS_DIFFERENTIAL, _ID_GPS_DIFFERENTIAL)
+#define EXIFTAGTYPE_GPS_DIFFERENTIAL EXIF_SHORT
+// Image width
+// Use EXIFTAGTYPE_IMAGE_WIDTH as the exif_tag_type (EXIF_LONG)
+// Count should be 1
+#define _ID_IMAGE_WIDTH 0x0100
+#define EXIFTAGID_IMAGE_WIDTH CONSTRUCT_TAGID(IMAGE_WIDTH, _ID_IMAGE_WIDTH)
+#define EXIFTAGTYPE_IMAGE_WIDTH EXIF_LONG
+// Image height
+// Use EXIFTAGTYPE_IMAGE_LENGTH as the exif_tag_type (EXIF_SHORT_OR_LONG)
+// Count should be 1
+#define _ID_IMAGE_LENGTH 0x0101
+#define EXIFTAGID_IMAGE_LENGTH CONSTRUCT_TAGID(IMAGE_LENGTH, _ID_IMAGE_LENGTH)
+#define EXIFTAGTYPE_IMAGE_LENGTH EXIF_LONG
+// Number of bits per component
+// Use EXIFTAGTYPE_BITS_PER_SAMPLE as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_BITS_PER_SAMPLE 0x0102
+#define EXIFTAGID_BITS_PER_SAMPLE \
+  CONSTRUCT_TAGID(BITS_PER_SAMPLE, _ID_BITS_PER_SAMPLE)
+#define EXIFTAGTYPE_BITS_PER_SAMPLE EXIF_SHORT
+// Compression scheme
+// Use EXIFTAGTYPE_COMPRESSION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_COMPRESSION 0x0103
+#define EXIFTAGID_COMPRESSION CONSTRUCT_TAGID(COMPRESSION, _ID_COMPRESSION)
+#define EXIFTAGTYPE_COMPRESSION EXIF_SHORT
+// Pixel composition
+// Use EXIFTAGTYPE_PHOTOMETRIC_INTERPRETATION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_PHOTOMETRIC_INTERPRETATION 0x0106
+#define EXIFTAGID_PHOTOMETRIC_INTERPRETATION \
+  CONSTRUCT_TAGID(PHOTOMETRIC_INTERPRETATION, _ID_PHOTOMETRIC_INTERPRETATION)
+#define EXIFTAGTYPE_PHOTOMETRIC_INTERPRETATION EXIF_SHORT
+
+// Thresholding
+// Use EXIFTAGTYPE_THRESH_HOLDING as the exif_tag_type (EXIF_SHORT)
+//
+//1 = No dithering or halftoning
+//2 = Ordered dither or halftone
+//3 = Randomized dither
+#define _ID_THRESH_HOLDING 0x0107
+#define EXIFTAGID_THRESH_HOLDING \
+  CONSTRUCT_TAGID(THRESH_HOLDING, _ID_THRESH_HOLDING)
+#define EXIFTAGTYPE_THRESH_HOLDING EXIF_SHORT
+
+// Cell Width
+// Use EXIFTAGTYPE_CELL_WIDTH as the exif_tag_type (EXIF_SHORT)
+//
+#define _ID_CELL_WIDTH 0x0108
+#define EXIFTAGID_CELL_WIDTH CONSTRUCT_TAGID(CELL_WIDTH, _ID_CELL_WIDTH)
+#define EXIFTAGTYPE_CELL_WIDTH EXIF_SHORT
+// Cell Height
+// Use EXIFTAGTYPE_CELL_HEIGHT as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_CELL_HEIGHT 0x0109
+#define EXIFTAGID_CELL_HEIGHT CONSTRUCT_TAGID(CELL_HEIGHT, _ID_CELL_HEIGHT)
+#define EXIFTAGTYPE_CELL_HEIGHT EXIF_SHORT
+// Fill Order
+// Use EXIFTAGTYPE_FILL_ORDER as the exif_tag_type (EXIF_SHORT)
+// 	1 = Normal
+//  2 = Reversed
+#define _ID_FILL_ORDER 0x010A
+#define EXIFTAGID_FILL_ORDER CONSTRUCT_TAGID(FILL_ORDER, _ID_FILL_ORDER)
+#define EXIFTAGTYPE_FILL_ORDER EXIF_SHORT
+
+// DOCUMENT NAME
+// Use EXIFTAGTYPE_DOCUMENT_NAME as the exif_tag_type (EXIF_ASCII)
+//
+#define _ID_DOCUMENT_NAME 0x010D
+#define EXIFTAGID_DOCUMENT_NAME CONSTRUCT_TAGID(DOCUMENT_NAME, _ID_DOCUMENT_NAME)
+#define EXIFTAGTYPE_DOCUMENT_NAME EXIF_ASCII
+
+// Image title
+// Use EXIFTAGTYPE_IMAGE_DESCRIPTION as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_IMAGE_DESCRIPTION 0x010e
+#define EXIFTAGID_IMAGE_DESCRIPTION \
+  CONSTRUCT_TAGID(IMAGE_DESCRIPTION, _ID_IMAGE_DESCRIPTION)
+#define EXIFTAGTYPE_IMAGE_DESCRIPTION EXIF_ASCII
+// Image input equipment manufacturer
+// Use EXIFTAGTYPE_MAKE as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_MAKE 0x010f
+#define EXIFTAGID_MAKE CONSTRUCT_TAGID(MAKE, _ID_MAKE)
+#define EXIFTAGTYPE_MAKE EXIF_ASCII
+// Image input equipment model
+// Use EXIFTAGTYPE_MODEL as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_MODEL 0x0110
+#define EXIFTAGID_MODEL CONSTRUCT_TAGID(MODEL, _ID_MODEL)
+#define EXIFTAGTYPE_MODEL EXIF_ASCII
+// Image data location
+// Use EXIFTAGTYPE_STRIP_OFFSETS as the exif_tag_type (EXIF_LONG)
+// Count = StripsPerImage                    when PlanarConfiguration = 1
+//       = SamplesPerPixel * StripsPerImage  when PlanarConfiguration = 2
+#define _ID_STRIP_OFFSETS 0x0111
+#define EXIFTAGID_STRIP_OFFSETS \
+  CONSTRUCT_TAGID(STRIP_OFFSETS, _ID_STRIP_OFFSETS)
+#define EXIFTAGTYPE_STRIP_OFFSETS EXIF_LONG
+// Orientation of image
+// Use EXIFTAGTYPE_ORIENTATION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_ORIENTATION 0x0112
+#define EXIFTAGID_ORIENTATION CONSTRUCT_TAGID(ORIENTATION, _ID_ORIENTATION)
+#define EXIFTAGTYPE_ORIENTATION EXIF_SHORT
+// Number of components
+// Use EXIFTAGTYPE_SAMPLES_PER_PIXEL as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_SAMPLES_PER_PIXEL 0x0115
+#define EXIFTAGID_SAMPLES_PER_PIXEL \
+  CONSTRUCT_TAGID(SAMPLES_PER_PIXEL, _ID_SAMPLES_PER_PIXEL)
+#define EXIFTAGTYPE_SAMPLES_PER_PIXEL EXIF_SHORT
+// Number of rows per strip
+// Use EXIFTAGTYPE_ROWS_PER_STRIP as the exif_tag_type (EXIF_LONG)
+// Count should be 1
+#define _ID_ROWS_PER_STRIP 0x0116
+#define EXIFTAGID_ROWS_PER_STRIP \
+  CONSTRUCT_TAGID(ROWS_PER_STRIP, _ID_ROWS_PER_STRIP)
+#define EXIFTAGTYPE_ROWS_PER_STRIP EXIF_LONG
+// Bytes per compressed strip
+// Use EXIFTAGTYPE_STRIP_BYTE_COUNTS as the exif_tag_type (EXIF_LONG)
+// Count = StripsPerImage                    when PlanarConfiguration = 1
+//       = SamplesPerPixel * StripsPerImage  when PlanarConfiguration = 2
+#define _ID_STRIP_BYTE_COUNTS 0x0117
+#define EXIFTAGID_STRIP_BYTE_COUNTS \
+  CONSTRUCT_TAGID(STRIP_BYTE_COUNTS, _ID_STRIP_BYTE_COUNTS)
+#define EXIFTAGTYPE_STRIP_BYTE_COUNTS EXIF_LONG
+// MinSampleValue
+// Use EXIFTAGTYPE_MIN_SAMPLE_VALUE as the exif_tag_type (EXIF_SHORT)
+#define _ID_MIN_SAMPLE_VALUE 0x0118
+#define EXIFTAGID_MIN_SAMPLE_VALUE  \
+  CONSTRUCT_TAGID(MIN_SAMPLE_VALUE, _ID_MIN_SAMPLE_VALUE)
+#define EXIFTAGTYPE_MIN_SAMPLE_VALUE EXIF_SHORT
+// MaxSampleValue
+// Use EXIFTAGTYPE_MAX_SAMPLE_VALUE as the exif_tag_type (EXIF_SHORT)
+#define _ID_MAX_SAMPLE_VALUE 0x0119
+#define EXIFTAGID_MAX_SAMPLE_VALUE CONSTRUCT_TAGID(MAX_SAMPLE_VALUE, _ID_MAX_SAMPLE_VALUE)
+#define EXIFTAGTYPE_MAX_SAMPLE_VALUE EXIF_SHORT
+
+// Image resolution in width direction
+// Use EXIFTAGTYPE_X_RESOLUTION as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_X_RESOLUTION 0x011a
+#define EXIFTAGID_X_RESOLUTION \
+  CONSTRUCT_TAGID(X_RESOLUTION, _ID_X_RESOLUTION)
+#define EXIFTAGTYPE_X_RESOLUTION EXIF_RATIONAL
+// Image resolution in height direction
+// Use EXIFTAGTYPE_Y_RESOLUTION as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_Y_RESOLUTION 0x011b
+#define EXIFTAGID_Y_RESOLUTION \
+  CONSTRUCT_TAGID(Y_RESOLUTION, _ID_Y_RESOLUTION)
+#define EXIFTAGTYPE_Y_RESOLUTION EXIF_RATIONAL
+// Image data arrangement
+// Use EXIFTAGTYPE_PLANAR_CONFIGURATION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_PLANAR_CONFIGURATION 0x011c
+#define EXIFTAGID_PLANAR_CONFIGURATION \
+  CONSTRUCT_TAGID(PLANAR_CONFIGURATION, _ID_PLANAR_CONFIGURATION)
+#define EXIFTAGTYPE_PLANAR_CONFIGURATION EXIF_SHORT
+// PageName
+// Use EXIFTAGTYPE_PAGE_NAME as the exif_tag_type (EXIF_ASCII)
+// Count should be 1
+#define _ID_PAGE_NAME 0x011d
+#define EXIFTAGID_PAGE_NAME CONSTRUCT_TAGID(PAGE_NAME, _ID_PAGE_NAME)
+#define EXIFTAGTYPE_PAGE_NAME EXIF_ASCII
+// XPosition
+// Use EXIFTAGTYPE_X_POSITION as the exif_tag_type (EXIF_RATIONAL)
+//
+#define _ID_X_POSITION 0x011e
+#define EXIFTAGID_X_POSITION CONSTRUCT_TAGID(X_POSITION, _ID_X_POSITION)
+#define EXIFTAGTYPE_X_POSITION EXIF_RATIONAL
+// YPosition
+// Use EXIFTAGTYPE_Y_POSITION as the exif_tag_type (EXIF_RATIONAL)
+//
+#define _ID_Y_POSITION 0x011f
+#define EXIFTAGID_Y_POSITION CONSTRUCT_TAGID(Y_POSITION, _ID_Y_POSITION)
+#define EXIFTAGTYPE_Y_POSITION EXIF_RATIONAL
+
+// FREE_OFFSET
+// Use EXIFTAGTYPE_FREE_OFFSET as the exif_tag_type (EXIF_LONG)
+//
+#define _ID_FREE_OFFSET 0x0120
+#define EXIFTAGID_FREE_OFFSET CONSTRUCT_TAGID(FREE_OFFSET, _ID_FREE_OFFSET)
+#define EXIFTAGTYPE_FREE_OFFSET EXIF_LONG
+// FREE_BYTE_COUNTS
+// Use EXIFTAGTYPE_FREE_BYTE_COUNTS as the exif_tag_type (EXIF_LONG)
+//
+#define _ID_FREE_BYTE_COUNTS 0x0121
+#define EXIFTAGID_FREE_BYTE_COUNTS \
+  CONSTRUCT_TAGID(FREE_BYTE_COUNTS, _ID_FREE_BYTE_COUNTS)
+#define EXIFTAGTYPE_FREE_BYTE_COUNTS EXIF_LONG
+
+// GrayResponseUnit
+// Use EXIFTAGTYPE_GRAY_RESPONSE_UNIT as the exif_tag_type (EXIF_SHORT)
+//
+#define _ID_GRAY_RESPONSE_UNIT 0x0122
+#define EXIFTAGID_GRAY_RESPONSE_UNIT \
+  CONSTRUCT_TAGID(GRAY_RESPONSE_UNIT, _ID_GRAY_RESPONSE_UNIT)
+#define EXIFTAGTYPE_GRAY_RESPONSE_UNIT EXIF_SHORT
+// GrayResponseCurve
+// Use EXIFTAGTYPE_GRAY_RESPONSE_CURVE  as the exif_tag_type (EXIF_SHORT)
+//
+#define _ID_GRAY_RESPONSE_CURVE 0x0123
+#define EXIFTAGID_GRAY_RESPONSE_CURVE \
+  CONSTRUCT_TAGID(GRAY_RESPONSE_CURVE , _ID_GRAY_RESPONSE_CURVE )
+#define EXIFTAGTYPE_GRAY_RESPONSE_CURVE EXIF_SHORT
+
+// T4_OPTION
+// Use EXIFTAGTYPE_T4_OPTION as the exif_tag_type (EXIF_LONG)
+//
+#define _ID_T4_OPTION  0x0124
+#define EXIFTAGID_T4_OPTION CONSTRUCT_TAGID(T4_OPTION, _ID_T4_OPTION)
+#define EXIFTAGTYPE_T4_OPTION EXIF_LONG
+// T6_OPTION
+// Use EXIFTAGTYPE_T6_OPTION as the exif_tag_type (EXIF_LONG)
+//
+#define _ID_T6_OPTION 0x0125
+#define EXIFTAGID_T6_OPTION CONSTRUCT_TAGID(T6_OPTION, _ID_T6_OPTION)
+#define EXIFTAGTYPE_T6_OPTION EXIF_LONG
+
+// Unit of X and Y resolution
+// Use EXIFTAGTYPE_RESOLUTION_UNIT as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_RESOLUTION_UNIT 0x0128
+#define EXIFTAGID_RESOLUTION_UNIT \
+  CONSTRUCT_TAGID(RESOLUTION_UNIT, _ID_RESOLUTION_UNIT)
+#define EXIFTAGTYPE_RESOLUTION_UNIT EXIF_SHORT
+
+// Page Number
+// Use EXIFTAGTYPE_PAGE_NUMBER  as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_PAGE_NUMBER 0x0129
+#define EXIFTAGID_PAGE_NUMBER CONSTRUCT_TAGID(PAGE_NUMBER, _ID_PAGE_NUMBER)
+#define EXIFTAGTYPE_PAGE_NUMBER EXIF_SHORT
+// Transfer function
+// Use EXIFTAGTYPE_TRANSFER_FUNCTION as the exif_tag_type (EXIF_SHORT)
+// Count should be 3*256
+#define _ID_TRANSFER_FUNCTION 0x012d
+#define EXIFTAGID_TRANSFER_FUNCTION \
+  CONSTRUCT_TAGID(TRANSFER_FUNCTION, _ID_TRANSFER_FUNCTION)
+#define EXIFTAGTYPE_TRANSFER_FUNCTION EXIF_SHORT
+// Software used
+// Use EXIFTAGTYPE_SOFTWARE as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_SOFTWARE 0x0131
+#define EXIFTAGID_SOFTWARE CONSTRUCT_TAGID(SOFTWARE, _ID_SOFTWARE)
+#define EXIFTAGTYPE_SOFTWARE EXIF_ASCII
+// File change date and time
+// Use EXIFTAGTYPE_DATE_TIME as the exif_tag_type (EXIF_ASCII)
+// Count should be 20
+#define _ID_DATE_TIME 0x0132
+#define EXIFTAGID_DATE_TIME CONSTRUCT_TAGID(DATE_TIME, _ID_DATE_TIME)
+#define EXIFTAGTYPE_DATE_TIME EXIF_ASCII
+// ARTIST, person who created this image
+// Use EXIFTAGTYPE_ARTIST as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_ARTIST 0x013b
+#define EXIFTAGID_ARTIST CONSTRUCT_TAGID(ARTIST, _ID_ARTIST)
+#define EXIFTAGTYPE_ARTIST EXIF_ASCII
+// Host Computer Name
+// Use EXIFTAGTYPE_HOST_COMPUTER as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_HOST_COMPUTER 0x013c
+#define EXIFTAGID_HOST_COMPUTER \
+  CONSTRUCT_TAGID(HOST_COMPUTER , _ID_HOST_COMPUTER )
+#define EXIFTAGTYPE_HOST_COMPUTER EXIF_ASCII
+// Predictor
+// Use EXIFTAGTYPE_PREDICTOR as the exif_tag_type (EXIF_SHORT)
+// Count can be any
+#define _ID_PREDICTOR 0x013d
+#define EXIFTAGID_PREDICTOR CONSTRUCT_TAGID(PREDICTOR , _ID_PREDICTOR )
+#define EXIFTAGTYPE_PREDICTOR EXIF_SHORT
+// White point chromaticity
+// Use EXIFTAGTYPE_WHITE_POINT as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 2
+#define _ID_WHITE_POINT 0x013e
+#define EXIFTAGID_WHITE_POINT CONSTRUCT_TAGID(WHITE_POINT, _ID_WHITE_POINT)
+#define EXIFTAGTYPE_WHITE_POINT EXIF_RATIONAL
+// Chromaticities of primaries
+// Use EXIFTAGTYPE_PRIMARY_CHROMATICITIES as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 6
+#define _ID_PRIMARY_CHROMATICITIES 0x013f
+#define EXIFTAGID_PRIMARY_CHROMATICITIES \
+  CONSTRUCT_TAGID(PRIMARY_CHROMATICITIES, _ID_PRIMARY_CHROMATICITIES)
+#define EXIFTAGTYPE_PRIMARY_CHROMATICITIES EXIF_RATIONAL
+
+// COLOR_MAP
+// Use EXIFTAGTYPE_COLOR_MAP as the exif_tag_type (EXIF_SHORT)
+// Count should be 6
+#define _ID_COLOR_MAP 0x0140
+#define EXIFTAGID_COLOR_MAP CONSTRUCT_TAGID(COLOR_MAP, _ID_COLOR_MAP)
+#define EXIFTAGTYPE_COLOR_MAP EXIF_SHORT
+// HALFTONE_HINTS
+// Use EXIFTAGTYPE_HALFTONE_HINTS as the exif_tag_type (EXIF_SHORT)
+// Count should be 6
+#define _ID_HALFTONE_HINTS 0x0141
+#define EXIFTAGID_HALFTONE_HINTS \
+  CONSTRUCT_TAGID(HALFTONE_HINTS, _ID_HALFTONE_HINTS)
+#define EXIFTAGTYPE_HALFTONE_HINTS EXIF_SHORT
+
+// TILE_WIDTH
+// Use EXIFTAGTYPE_TILE_WIDTH as the exif_tag_type (EXIF_LONG)
+// Count should be 6
+#define _ID_TILE_WIDTH 0x0142
+#define EXIFTAGID_TILE_WIDTH CONSTRUCT_TAGID(TILE_WIDTH, _ID_TILE_WIDTH)
+#define EXIFTAGTYPE_TILE_WIDTH EXIF_LONG
+// TILE_LENGTH
+// Use EXIFTAGTYPE_TILE_LENGTH  as the exif_tag_type (EXIF_LONG)
+// Count should be 6
+#define _ID_TILE_LENGTH 0x0143
+#define EXIFTAGID_TILE_LENGTH CONSTRUCT_TAGID(TILE_LENGTH , _ID_TILE_LENGTH )
+#define EXIFTAGTYPE_TILE_LENGTH EXIF_LONG
+// TILE_OFFSET
+// Use EXIFTAGTYPE_TILE_OFFSET as the exif_tag_type (EXIF_LONG)
+//
+#define _ID_TILE_OFFSET 0x0144
+#define EXIFTAGID_TILE_OFFSET CONSTRUCT_TAGID(TILE_OFFSET , _ID_TILE_OFFSET )
+#define EXIFTAGTYPE_TILE_OFFSET EXIF_LONG
+// tile Byte Counts
+// Use EXIFTAGTYPE_TILE_OFFSET as the exif_tag_type (EXIF_LONG)
+//
+#define _ID_TILE_BYTE_COUNTS 0x0145
+#define EXIFTAGID_TILE_BYTE_COUNTS  \
+  CONSTRUCT_TAGID(TILE_BYTE_COUNTS  , _ID_TILE_BYTE_COUNTS  )
+#define EXIFTAGTYPE_TILE_BYTE_COUNTS EXIF_LONG
+
+// INK_SET
+// Use EXIFTAGTYPE_TILE_LENGTH  as the exif_tag_type (EXIF_SHORT)
+// Count should be 6
+#define _ID_INK_SET 0x014c
+#define EXIFTAGID_INK_SET CONSTRUCT_TAGID(INK_SET , _ID_INK_SET )
+#define EXIFTAGTYPE_INK_SET EXIF_SHORT
+// INK_NAMES
+// Use EXIFTAGTYPE_INK_NAMES  as the exif_tag_type (EXIF_ASCII)
+// Count should be 6
+#define _ID_INK_NAMES 0x014D
+#define EXIFTAGID_INK_NAMES CONSTRUCT_TAGID(INK_NAMES , _ID_INK_NAMES)
+#define EXIFTAGTYPE_INK_NAMES EXIF_ASCII
+// NUMBER_OF_INKS
+// Use EXIFTAGTYPE_NUMBER_OF_INKS  as the exif_tag_type (EXIF_SHORT)
+// Count should be 6
+#define _ID_NUMBER_OF_INKS 0x014e
+#define EXIFTAGID_NUMBER_OF_INKS \
+  CONSTRUCT_TAGID(NUMBER_OF_INKS , _ID_NUMBER_OF_INKS )
+#define EXIFTAGTYPE_NUMBER_OF_INKS EXIF_SHORT
+
+// DOT_RANGE
+// Use EXIFTAGTYPE_DOT_RANGE  as the exif_tag_type (EXIF_ASCII)
+// Count should be 6
+#define _ID_DOT_RANGE 0x0150
+#define EXIFTAGID_DOT_RANGE CONSTRUCT_TAGID(DOT_RANGE , _ID_DOT_RANGE )
+#define EXIFTAGTYPE_DOT_RANGE EXIF_ASCII
+
+// TARGET_PRINTER
+// Use EXIFTAGTYPE_TARGET_PRINTER  as the exif_tag_type (EXIF_ASCII)
+// Count should be 6
+#define _ID_TARGET_PRINTER 0x0151
+#define EXIFTAGID_TARGET_PRINTER \
+  CONSTRUCT_TAGID(TARGET_PRINTER , _ID_TARGET_PRINTER)
+#define EXIFTAGTYPE_TARGET_PRINTER EXIF_ASCII
+// EXTRA_SAMPLES
+// Use EXIFTAGTYPE_EXTRA_SAMPLES as the exif_tag_type (EXIF_SHORT)
+// Count should be 6
+#define _ID_EXTRA_SAMPLES 0x0152
+#define EXIFTAGID_EXTRA_SAMPLES \
+  CONSTRUCT_TAGID(EXTRA_SAMPLES , _ID_EXTRA_SAMPLES )
+#define EXIFTAGTYPE_EXTRA_SAMPLES EXIF_SHORT
+
+// SAMPLE_FORMAT
+// Use EXIFTAGTYPE_SAMPLE_FORMAT  as the exif_tag_type (EXIF_SHORT)
+// Count should be 6
+#define _ID_SAMPLE_FORMAT 0x0153
+#define EXIFTAGID_SAMPLE_FORMAT \
+  CONSTRUCT_TAGID(SAMPLE_FORMAT , _ID_SAMPLE_FORMAT )
+#define EXIFTAGTYPE_SAMPLE_FORMAT EXIF_SHORT
+
+// Table of values that extends the range of the transfer function.
+// Use EXIFTAGTYPE_TRANSFER_RANGE as the exif_tag_type (EXIF_SHORT)
+// Count should be 6
+#define _ID_TRANSFER_RANGE 0x0156
+#define EXIFTAGID_TRANSFER_RANGE \
+  CONSTRUCT_TAGID(TRANSFER_RANGE , _ID_TRANSFER_RANGE )
+#define EXIFTAGTYPE_TRANSFER_RANGE EXIF_SHORT
+
+// JPEG compression process.
+// Use EXIFTAGTYPE_JPEG_PROC as the exif_tag_type (EXIF_SHORT)
+//
+#define _ID_JPEG_PROC 0x0200
+#define EXIFTAGID_JPEG_PROC CONSTRUCT_TAGID(JPEG_PROC , _ID_JPEG_PROC )
+#define EXIFTAGTYPE_JPEG_PROC EXIF_SHORT
+
+
+// Offset to JPEG SOI
+// Use EXIFTAGTYPE_JPEG_INTERCHANGE_FORMAT as the exif_tag_type (EXIF_LONG)
+// Count is undefined
+#define _ID_JPEG_INTERCHANGE_FORMAT 0x0201
+#define EXIFTAGID_JPEG_INTERCHANGE_FORMAT \
+  CONSTRUCT_TAGID(JPEG_INTERCHANGE_FORMAT, _ID_JPEG_INTERCHANGE_FORMAT)
+#define EXIFTAGTYPE_JPEG_INTERCHANGE_FORMAT EXIF_LONG
+// Bytes of JPEG data
+// Use EXIFTAGTYPE_JPEG_INTERCHANGE_FORMAT_LENGTH as the exif_tag_type (EXIF_LONG)
+// Count is undefined
+#define _ID_JPEG_INTERCHANGE_FORMAT_LENGTH 0x0202
+#define EXIFTAGID_JPEG_INTERCHANGE_FORMAT_LENGTH \
+  CONSTRUCT_TAGID(JPEG_INTERCHANGE_FORMAT_LENGTH, \
+  _ID_JPEG_INTERCHANGE_FORMAT_LENGTH)
+#define EXIFTAGTYPE_JPEG_INTERCHANGE_FORMAT_LENGTH EXIF_LONG
+
+// Length of the restart interval.
+// Use EXIFTAGTYPE_JPEG_RESTART_INTERVAL as the exif_tag_type (EXIF_SHORT)
+// Count is undefined
+#define _ID_JPEG_RESTART_INTERVAL 0x0203
+#define EXIFTAGID_JPEG_RESTART_INTERVAL \
+  CONSTRUCT_TAGID(JPEG_RESTART_INTERVAL, _ID_JPEG_RESTART_INTERVAL)
+#define EXIFTAGTYPE_JPEG_RESTART_INTERVAL EXIF_SHORT
+
+// JPEGLosslessPredictors
+// Use EXIFTAGTYPE_JPEG_LOSSLESS_PREDICTORS as the exif_tag_type (EXIF_SHORT)
+// Count is undefined
+#define _ID_JPEG_LOSSLESS_PREDICTORS 0x0205
+#define EXIFTAGID_JPEG_LOSSLESS_PREDICTORS  \
+  CONSTRUCT_TAGID(JPEG_LOSSLESS_PREDICTORS, _ID_JPEG_LOSSLESS_PREDICTORS)
+#define EXIFTAGTYPE_JPEG_LOSSLESS_PREDICTORS EXIF_SHORT
+
+// JPEGPointTransforms
+// Use EXIFTAGTYPE_JPEG_POINT_TRANSFORMS as the exif_tag_type (EXIF_SHORT)
+// Count is undefined
+#define _ID_JPEG_POINT_TRANSFORMS 0x0206
+#define EXIFTAGID_JPEG_POINT_TRANSFORMS  \
+  CONSTRUCT_TAGID(JPEG_POINT_TRANSFORMS, _ID_JPEG_POINT_TRANSFORMS)
+#define EXIFTAGTYPE_JPEG_POINT_TRANSFORMS EXIF_SHORT
+
+// JPEG_Q_TABLES
+// Use EXIFTAGTYPE_JPEG_Q_TABLES as the exif_tag_type (EXIF_LONG)
+// Count is undefined
+#define _ID_JPEG_Q_TABLES 0x0207
+#define EXIFTAGID_JPEG_Q_TABLES \
+  CONSTRUCT_TAGID(JPEG_Q_TABLES, _ID_JPEG_Q_TABLES)
+#define EXIFTAGTYPE_JPEG_Q_TABLES EXIF_LONG
+// JPEG_DC_TABLES
+// Use EXIFTAGTYPE_JPEG_DC_TABLES as the exif_tag_type (EXIF_LONG)
+// Count is undefined
+#define _ID_JPEG_DC_TABLES 0x0208
+#define EXIFTAGID_JPEG_DC_TABLES \
+  CONSTRUCT_TAGID(JPEG_DC_TABLES, _ID_JPEG_DC_TABLES)
+#define EXIFTAGTYPE_JPEG_DC_TABLES EXIF_LONG
+// JPEG_AC_TABLES
+// Use EXIFTAGTYPE_JPEG_AC_TABLES as the exif_tag_type (EXIF_LONG)
+// Count is undefined
+#define _ID_JPEG_AC_TABLES 0x0209
+#define EXIFTAGID_JPEG_AC_TABLES \
+  CONSTRUCT_TAGID(JPEG_AC_TABLES, _ID_JPEG_AC_TABLES)
+#define EXIFTAGTYPE_JPEG_AC_TABLES EXIF_LONG
+
+// Color space transformation matrix coefficients
+// Use EXIFTAGTYPE_YCBCR_COEFFICIENTS as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 3
+#define _ID_YCBCR_COEFFICIENTS 0x0211
+#define EXIFTAGID_YCBCR_COEFFICIENTS \
+  CONSTRUCT_TAGID(YCBCR_COEFFICIENTS, _ID_YCBCR_COEFFICIENTS)
+#define EXIFTAGTYPE_YCBCR_COEFFICIENTS EXIF_RATIONAL
+// Subsampling ratio of Y to C
+// Use EXIFTAGTYPE_YCBCR_SUB_SAMPLING as the exif_tag_type (EXIF_SHORT)
+// Count should be 2
+#define _ID_YCBCR_SUB_SAMPLING 0x0212
+#define EXIFTAGID_YCBCR_SUB_SAMPLING  \
+  CONSTRUCT_TAGID(YCBCR_SUB_SAMPLING, _ID_YCBCR_SUB_SAMPLING)
+#define EXIFTAGTYPE_YCBCR_SUB_SAMPLING EXIF_SHORT
+// Y and C positioning
+// Use EXIFTAGTYPE_YCBCR_POSITIONING as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_YCBCR_POSITIONING 0x0213
+#define EXIFTAGID_YCBCR_POSITIONING  \
+  CONSTRUCT_TAGID(YCBCR_POSITIONING, _ID_YCBCR_POSITIONING)
+#define EXIFTAGTYPE_YCBCR_POSITIONING EXIF_SHORT
+// Pair of black and white reference values
+// Use EXIFTAGTYPE_REFERENCE_BLACK_WHITE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 6
+#define _ID_REFERENCE_BLACK_WHITE 0x0214
+#define EXIFTAGID_REFERENCE_BLACK_WHITE \
+  CONSTRUCT_TAGID(REFERENCE_BLACK_WHITE, _ID_REFERENCE_BLACK_WHITE)
+#define EXIFTAGTYPE_REFERENCE_BLACK_WHITE EXIF_RATIONAL
+// GAMMA
+// Use EXIFTAGTYPE_GAMMA as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 6
+#define _ID_GAMMA 0x0301
+#define EXIFTAGID_GAMMA CONSTRUCT_TAGID(GAMMA, _ID_GAMMA)
+#define EXIFTAGTYPE_GAMMA EXIF_RATIONAL
+// Null-terminated character string that identifies an ICC profile.
+// Use EXIFTAGTYPE_ICC_PROFILE_DESCRIPTOR as the exif_tag_type (EXIF_ASCII)
+// Count should be 6
+#define _ID_ICC_PROFILE_DESCRIPTOR 0x0302
+#define EXIFTAGID_ICC_PROFILE_DESCRIPTOR \
+  CONSTRUCT_TAGID(ICC_PROFILE_DESCRIPTOR, _ID_ICC_PROFILE_DESCRIPTOR)
+#define EXIFTAGTYPE_ICC_PROFILE_DESCRIPTOR EXIF_ASCII
+// SRGB_RENDERING_INTENT
+// Use EXIFTAGTYPE_SRGB_RENDERING_INTENT as the exif_tag_type (EXIF_BYTE)
+// Count should be 6
+#define _ID_SRGB_RENDERING_INTENT 0x0303
+#define EXIFTAGID_SRGB_RENDERING_INTENT \
+  CONSTRUCT_TAGID(SRGB_RENDERING_INTENT, _ID_SRGB_RENDERING_INTENT)
+#define EXIFTAGTYPE_SRGB_RENDERING_INTENT EXIF_BYTE
+
+// Null-terminated character string that specifies the title of the image.
+// Use EXIFTAGTYPE_IMAGE_TITLE as the exif_tag_type (EXIF_ASCII		)
+//
+#define _ID_IMAGE_TITLE 0x0320
+#define EXIFTAGID_IMAGE_TITLE CONSTRUCT_TAGID(IMAGE_TITLE, _ID_IMAGE_TITLE)
+#define EXIFTAGTYPE_IMAGE_TITLE EXIF_ASCII
+
+// Copyright holder
+// Use EXIFTAGTYPE_COPYRIGHT as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_COPYRIGHT 0x8298
+#define EXIFTAGID_COPYRIGHT CONSTRUCT_TAGID(COPYRIGHT, _ID_COPYRIGHT)
+#define EXIFTAGTYPE_COPYRIGHT EXIF_ASCII
+// Old Subfile Type
+// Use EXIFTAGTYPE_NEW_SUBFILE_TYPE as the exif_tag_type (EXIF_SHORT)
+// Count can be any
+#define _ID_NEW_SUBFILE_TYPE 0x00fe
+#define EXIFTAGID_NEW_SUBFILE_TYPE \
+  CONSTRUCT_TAGID(NEW_SUBFILE_TYPE, _ID_NEW_SUBFILE_TYPE)
+#define EXIFTAGTYPE_NEW_SUBFILE_TYPE EXIF_SHORT
+
+// New Subfile Type
+// Use EXIFTAGTYPE_NEW_SUBFILE_TYPE as the exif_tag_type (EXIF_LONG)
+// Count can be any
+#define _ID_SUBFILE_TYPE 0x00ff
+#define EXIFTAGID_SUBFILE_TYPE CONSTRUCT_TAGID(SUBFILE_TYPE, _ID_SUBFILE_TYPE)
+#define EXIFTAGTYPE_SUBFILE_TYPE EXIF_LONG
+
+// Image width (of thumbnail)
+// Use EXIFTAGTYPE_TN_IMAGE_WIDTH as the exif_tag_type (EXIF_LONG)
+// Count should be 1
+#define _ID_TN_IMAGE_WIDTH 0x0100
+#define EXIFTAGID_TN_IMAGE_WIDTH \
+  CONSTRUCT_TAGID(TN_IMAGE_WIDTH, _ID_TN_IMAGE_WIDTH)
+#define EXIFTAGTYPE_TN_IMAGE_WIDTH EXIF_LONG
+// Image height (of thumbnail)
+// Use EXIFTAGTYPE_TN_IMAGE_LENGTH as the exif_tag_type (EXIF_SHORT_OR_LONG)
+// Count should be 1
+#define _ID_TN_IMAGE_LENGTH 0x0101
+#define EXIFTAGID_TN_IMAGE_LENGTH \
+  CONSTRUCT_TAGID(TN_IMAGE_LENGTH, _ID_TN_IMAGE_LENGTH)
+#define EXIFTAGTYPE_TN_IMAGE_LENGTH EXIF_LONG
+// Number of bits per component (of thumbnail)
+// Use EXIFTAGTYPE_TN_BITS_PER_SAMPLE as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_TN_BITS_PER_SAMPLE 0x0102
+#define EXIFTAGID_TN_BITS_PER_SAMPLE \
+  CONSTRUCT_TAGID(TN_BITS_PER_SAMPLE, _ID_TN_BITS_PER_SAMPLE)
+#define EXIFTAGTYPE_TN_BITS_PER_SAMPLE EXIF_SHORT
+// Compression scheme (of thumbnail)
+// Use EXIFTAGTYPE_TN_COMPRESSION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_TN_COMPRESSION 0x0103
+#define EXIFTAGID_TN_COMPRESSION \
+  CONSTRUCT_TAGID(TN_COMPRESSION, _ID_TN_COMPRESSION)
+#define EXIFTAGTYPE_TN_COMPRESSION EXIF_SHORT
+// Pixel composition (of thumbnail)
+// Use EXIFTAGTYPE_TN_PHOTOMETRIC_INTERPRETATION as the
+// exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_TN_PHOTOMETRIC_INTERPRETATION 0x0106
+#define EXIFTAGID_TN_PHOTOMETRIC_INTERPRETATION \
+  CONSTRUCT_TAGID(TN_PHOTOMETRIC_INTERPRETATION, \
+  _ID_TN_PHOTOMETRIC_INTERPRETATION)
+#define EXIFTAGTYPE_TN_PHOTOMETRIC_INTERPRETATION EXIF_SHORT
+// Image title (of thumbnail)
+// Use EXIFTAGTYPE_TN_IMAGE_DESCRIPTION as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_TN_IMAGE_DESCRIPTION 0x010e
+#define EXIFTAGID_TN_IMAGE_DESCRIPTION \
+  CONSTRUCT_TAGID(TN_IMAGE_DESCRIPTION, _ID_TN_IMAGE_DESCRIPTION)
+#define EXIFTAGTYPE_TN_IMAGE_DESCRIPTION EXIF_ASCII
+// Image input equipment manufacturer (of thumbnail)
+// Use EXIFTAGTYPE_TN_MAKE as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_TN_MAKE 0x010f
+#define EXIFTAGID_TN_MAKE CONSTRUCT_TAGID(TN_MAKE, _ID_TN_MAKE)
+#define EXIFTAGTYPE_TN_MAKE EXIF_ASCII
+// Image input equipment model (of thumbnail)
+// Use EXIFTAGTYPE_TN_MODEL as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_TN_MODEL 0x0110
+#define EXIFTAGID_TN_MODEL CONSTRUCT_TAGID(TN_MODEL, _ID_TN_MODEL)
+#define EXIFTAGTYPE_TN_MODEL EXIF_ASCII
+// Image data location (of thumbnail)
+// Use EXIFTAGTYPE_TN_STRIP_OFFSETS as the exif_tag_type (EXIF_LONG)
+// Count = StripsPerImage                    when PlanarConfiguration = 1
+//       = SamplesPerPixel * StripsPerImage  when PlanarConfiguration = 2
+#define _ID_TN_STRIP_OFFSETS 0x0111
+#define EXIFTAGID_TN_STRIP_OFFSETS \
+  CONSTRUCT_TAGID(STRIP_TN_OFFSETS, _ID_TN_STRIP_OFFSETS)
+#define EXIFTAGTYPE_TN_STRIP_OFFSETS EXIF_LONG
+// Orientation of image (of thumbnail)
+// Use EXIFTAGTYPE_TN_ORIENTATION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_TN_ORIENTATION 0x0112
+#define EXIFTAGID_TN_ORIENTATION \
+  CONSTRUCT_TAGID(TN_ORIENTATION, _ID_TN_ORIENTATION)
+#define EXIFTAGTYPE_TN_ORIENTATION EXIF_SHORT
+// Number of components (of thumbnail)
+// Use EXIFTAGTYPE_TN_SAMPLES_PER_PIXEL as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_TN_SAMPLES_PER_PIXEL 0x0115
+#define EXIFTAGID_TN_SAMPLES_PER_PIXEL \
+  CONSTRUCT_TAGID(TN_SAMPLES_PER_PIXEL, _ID_TN_SAMPLES_PER_PIXEL)
+#define EXIFTAGTYPE_TN_SAMPLES_PER_PIXEL EXIF_SHORT
+// Number of rows per strip (of thumbnail)
+// Use EXIFTAGTYPE_TN_ROWS_PER_STRIP as the exif_tag_type (EXIF_LONG)
+// Count should be 1
+#define _ID_TN_ROWS_PER_STRIP 0x0116
+#define EXIFTAGID_TN_ROWS_PER_STRIP \
+  CONSTRUCT_TAGID(TN_ROWS_PER_STRIP, _ID_TN_ROWS_PER_STRIP)
+#define EXIFTAGTYPE_TN_ROWS_PER_STRIP EXIF_LONG
+// Bytes per compressed strip (of thumbnail)
+// Use EXIFTAGTYPE_TN_STRIP_BYTE_COUNTS as the exif_tag_type (EXIF_LONG)
+// Count = StripsPerImage                    when PlanarConfiguration = 1
+//       = SamplesPerPixel * StripsPerImage  when PlanarConfiguration = 2
+#define _ID_TN_STRIP_BYTE_COUNTS 0x0117
+#define EXIFTAGID_TN_STRIP_BYTE_COUNTS \
+  CONSTRUCT_TAGID(TN_STRIP_BYTE_COUNTS, _ID_TN_STRIP_BYTE_COUNTS)
+#define EXIFTAGTYPE_TN_STRIP_BYTE_COUNTS EXIF_LONG
+// Image resolution in width direction (of thumbnail)
+// Use EXIFTAGTYPE_TN_X_RESOLUTION as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_TN_X_RESOLUTION 0x011a
+#define EXIFTAGID_TN_X_RESOLUTION \
+  CONSTRUCT_TAGID(TN_X_RESOLUTION, _ID_TN_X_RESOLUTION)
+#define EXIFTAGTYPE_TN_X_RESOLUTION EXIF_RATIONAL
+// Image resolution in height direction  (of thumbnail)
+// Use EXIFTAGTYPE_TN_Y_RESOLUTION as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_TN_Y_RESOLUTION 0x011b
+#define EXIFTAGID_TN_Y_RESOLUTION \
+  CONSTRUCT_TAGID(TN_Y_RESOLUTION, _ID_TN_Y_RESOLUTION)
+#define EXIFTAGTYPE_TN_Y_RESOLUTION EXIF_RATIONAL
+// Image data arrangement (of thumbnail)
+// Use EXIFTAGTYPE_TN_PLANAR_CONFIGURATION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_TN_PLANAR_CONFIGURATION 0x011c
+#define EXIFTAGID_TN_PLANAR_CONFIGURATION \
+  CONSTRUCT_TAGID(TN_PLANAR_CONFIGURATION, _ID_TN_PLANAR_CONFIGURATION)
+#define EXIFTAGTYPE_TN_PLANAR_CONFIGURATION EXIF_SHORT
+// Unit of X and Y resolution (of thumbnail)
+// Use EXIFTAGTYPE_TN_RESOLUTION_UNIT as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_TN_RESOLUTION_UNIT 0x128
+#define EXIFTAGID_TN_RESOLUTION_UNIT \
+  CONSTRUCT_TAGID(TN_RESOLUTION_UNIT, _ID_TN_RESOLUTION_UNIT)
+#define EXIFTAGTYPE_TN_RESOLUTION_UNIT EXIF_SHORT
+// Transfer function (of thumbnail)
+// Use EXIFTAGTYPE_TN_TRANSFER_FUNCTION as the exif_tag_type (EXIF_SHORT)
+// Count should be 3*256
+#define _ID_TN_TRANSFER_FUNCTION 0x012d
+#define EXIFTAGID_TN_TRANSFER_FUNCTION \
+  CONSTRUCT_TAGID(TN_TRANSFER_FUNCTION, _ID_TN_TRANSFER_FUNCTION)
+#define EXIFTAGTYPE_TN_TRANSFER_FUNCTION EXIF_SHORT
+// Software used (of thumbnail)
+// Use EXIFTAGTYPE_TN_SOFTWARE as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_TN_SOFTWARE 0x0131
+#define EXIFTAGID_TN_SOFTWARE CONSTRUCT_TAGID(TN_SOFTWARE, _ID_TN_SOFTWARE)
+#define EXIFTAGTYPE_TN_SOFTWARE EXIF_ASCII
+// File change date and time (of thumbnail)
+// Use EXIFTAGTYPE_TN_DATE_TIME as the exif_tag_type (EXIF_ASCII)
+// Count should be 20
+#define _ID_TN_DATE_TIME 0x0132
+#define EXIFTAGID_TN_DATE_TIME CONSTRUCT_TAGID(TN_DATE_TIME, _ID_TN_DATE_TIME)
+#define EXIFTAGTYPE_TN_DATE_TIME EXIF_ASCII
+// ARTIST, person who created this image (of thumbnail)
+// Use EXIFTAGTYPE_TN_ARTIST as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_TN_ARTIST 0x013b
+#define EXIFTAGID_TN_ARTIST CONSTRUCT_TAGID(TN_ARTIST, _ID_TN_ARTIST)
+#define EXIFTAGTYPE_TN_ARTIST EXIF_ASCII
+// White point chromaticity (of thumbnail)
+// Use EXIFTAGTYPE_TN_WHITE_POINT as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 2
+#define _ID_TN_WHITE_POINT 0x013e
+#define EXIFTAGID_TN_WHITE_POINT \
+  CONSTRUCT_TAGID(TN_WHITE_POINT, _ID_TN_WHITE_POINT)
+#define EXIFTAGTYPE_TN_WHITE_POINT EXIF_RATIONAL
+// Chromaticities of primaries (of thumbnail)
+// Use EXIFTAGTYPE_TN_PRIMARY_CHROMATICITIES as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 6
+#define _ID_TN_PRIMARY_CHROMATICITIES 0x013f
+#define EXIFTAGID_TN_PRIMARY_CHROMATICITIES \
+  CONSTRUCT_TAGID(TN_PRIMARY_CHROMATICITIES, _ID_TN_PRIMARY_CHROMATICITIES)
+#define EXIFTAGTYPE_TN_PRIMARY_CHROMATICITIES EXIF_RATIONAL
+// Offset to JPEG SOI (of thumbnail)
+// Use EXIFTAGTYPE_TN_JPEGINTERCHANGE_FORMAT as the exif_tag_type (EXIF_LONG)
+// Count is undefined
+#define _ID_TN_JPEGINTERCHANGE_FORMAT 0x0201
+#define EXIFTAGID_TN_JPEGINTERCHANGE_FORMAT \
+  CONSTRUCT_TAGID(TN_JPEGINTERCHANGE_FORMAT, _ID_TN_JPEGINTERCHANGE_FORMAT)
+#define EXIFTAGTYPE_TN_JPEGINTERCHANGE_FORMAT EXIF_LONG
+// Bytes of JPEG data (of thumbnail)
+// Use EXIFTAGTYPE_TN_JPEGINTERCHANGE_FORMAT_L as the exif_tag_type (EXIF_LONG)
+// Count is undefined
+#define _ID_TN_JPEGINTERCHANGE_FORMAT_L 0x0202
+#define EXIFTAGID_TN_JPEGINTERCHANGE_FORMAT_L \
+  CONSTRUCT_TAGID(TN_JPEGINTERCHANGE_FORMAT_L, _ID_TN_JPEGINTERCHANGE_FORMAT_L)
+#define EXIFTAGTYPE_TN_JPEGINTERCHANGE_FORMAT_L EXIF_LONG
+// Color space transformation matrix coefficients (of thumbnail)
+// Use EXIFTAGTYPE_TN_YCBCR_COEFFICIENTS as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 3
+#define _ID_TN_YCBCR_COEFFICIENTS 0x0211
+#define EXIFTAGID_TN_YCBCR_COEFFICIENTS \
+  CONSTRUCT_TAGID(TN_YCBCR_COEFFICIENTS, _ID_TN_YCBCR_COEFFICIENTS)
+#define EXIFTAGTYPE_TN_YCBCR_COEFFICIENTS EXIF_RATIONAL
+// Subsampling ratio of Y to C (of thumbnail)
+// Use EXIFTAGTYPE_TN_YCBCR_SUB_SAMPLING as the exif_tag_type (EXIF_SHORT)
+// Count should be 2
+#define _ID_TN_YCBCR_SUB_SAMPLING 0x0212
+#define EXIFTAGID_TN_YCBCR_SUB_SAMPLING \
+  CONSTRUCT_TAGID(TN_YCBCR_SUB_SAMPLING, _ID_TN_YCBCR_SUB_SAMPLING)
+#define EXIFTAGTYPE_TN_YCBCR_SUB_SAMPLING EXIF_SHORT
+// Y and C positioning (of thumbnail)
+// Use EXIFTAGTYPE_TN_YCBCR_POSITIONING as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_TN_YCBCR_POSITIONING 0x0213
+#define EXIFTAGID_TN_YCBCR_POSITIONING \
+  CONSTRUCT_TAGID(TN_YCBCR_POSITIONING, _ID_TN_YCBCR_POSITIONING)
+#define EXIFTAGTYPE_TN_YCBCR_POSITIONING    EXIF_SHORT
+// Pair of black and white reference values (of thumbnail)
+// Use EXIFTAGTYPE_TN_REFERENCE_BLACK_WHITE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 6
+#define _ID_TN_REFERENCE_BLACK_WHITE 0x0214
+#define EXIFTAGID_TN_REFERENCE_BLACK_WHITE \
+  CONSTRUCT_TAGID(TN_REFERENCE_BLACK_WHITE, _ID_TN_REFERENCE_BLACK_WHITE)
+#define EXIFTAGTYPE_TN_REFERENCE_BLACK_WHITE EXIF_RATIONAL
+// Copyright holder (of thumbnail)
+// Use EXIFTAGTYPE_TN_COPYRIGHT as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_TN_COPYRIGHT 0x8298
+#define EXIFTAGID_TN_COPYRIGHT CONSTRUCT_TAGID(TN_COPYRIGHT, _ID_TN_COPYRIGHT)
+#define EXIFTAGTYPE_TN_COPYRIGHT EXIF_ASCII
+// Exposure time
+// Use EXIFTAGTYPE_EXPOSURE_TIME as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_EXPOSURE_TIME 0x829a
+#define EXIFTAGID_EXPOSURE_TIME \
+  CONSTRUCT_TAGID(EXPOSURE_TIME, _ID_EXPOSURE_TIME)
+#define EXIFTAGTYPE_EXPOSURE_TIME EXIF_RATIONAL
+// F number
+// Use EXIFTAGTYPE_F_NUMBER as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_F_NUMBER 0x829d
+#define EXIFTAGID_F_NUMBER \
+  CONSTRUCT_TAGID(F_NUMBER, _ID_F_NUMBER)
+#define EXIFTAGTYPE_F_NUMBER EXIF_RATIONAL
+// Exif IFD pointer (NOT INTENDED to be accessible to user)
+#define _ID_EXIF_IFD_PTR 0x8769
+#define EXIFTAGID_EXIF_IFD_PTR \
+  CONSTRUCT_TAGID(EXIF_IFD, _ID_EXIF_IFD_PTR)
+#define EXIFTAGTYPE_EXIF_IFD_PTR EXIF_LONG
+
+// ICC_PROFILE (NOT INTENDED to be accessible to user)
+#define _ID_ICC_PROFILE 0x8773
+#define EXIFTAGID_ICC_PROFILE CONSTRUCT_TAGID(ICC_PROFILE, _ID_ICC_PROFILE)
+#define EXIFTAGTYPE_ICC_PROFILE EXIF_LONG
+// Exposure program
+// Use EXIFTAGTYPE_EXPOSURE_PROGRAM as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_EXPOSURE_PROGRAM 0x8822
+#define EXIFTAGID_EXPOSURE_PROGRAM \
+  CONSTRUCT_TAGID(EXPOSURE_PROGRAM, _ID_EXPOSURE_PROGRAM)
+#define EXIFTAGTYPE_EXPOSURE_PROGRAM EXIF_SHORT
+// Spectral sensitivity
+// Use EXIFTAGTYPE_SPECTRAL_SENSITIVITY as the exif_tag_type (EXIF_ASCII)
+// Count can be any
+#define _ID_SPECTRAL_SENSITIVITY 0x8824
+#define EXIFTAGID_SPECTRAL_SENSITIVITY \
+  CONSTRUCT_TAGID(SPECTRAL_SENSITIVITY, _ID_SPECTRAL_SENSITIVITY)
+#define EXIFTAGTYPE_SPECTRAL_SENSITIVITY EXIF_ASCII
+// GPS IFD pointer (NOT INTENDED to be accessible to user)
+#define _ID_GPS_IFD_PTR 0x8825
+#define EXIFTAGID_GPS_IFD_PTR \
+  CONSTRUCT_TAGID(GPS_IFD, _ID_GPS_IFD_PTR)
+#define EXIFTAGTYPE_GPS_IFD_PTR EXIF_LONG
+// ISO Speed Rating
+// Use EXIFTAGTYPE_ISO_SPEED_RATING as the exif_tag_type (EXIF_SHORT)
+// Count can be any
+#define _ID_ISO_SPEED_RATING 0x8827
+#define EXIFTAGID_ISO_SPEED_RATING \
+  CONSTRUCT_TAGID(ISO_SPEED_RATING, _ID_ISO_SPEED_RATING)
+#define EXIFTAGTYPE_ISO_SPEED_RATING EXIF_SHORT
+// Optoelectric conversion factor
+// Use EXIFTAGTYPE_OECF as the exif_tag_type (EXIF_UNDEFINED)
+// Count can be any
+#define _ID_OECF 0x8828
+#define EXIFTAGID_OECF CONSTRUCT_TAGID(OECF, _ID_OECF)
+#define EXIFTAGTYPE_OECF EXIF_UNDEFINED
+// Exif version
+// Use EXIFTAGTYPE_EXIF_VERSION as the exif_tag_type (EXIF_UNDEFINED)
+// Count should be 4
+#define _ID_EXIF_VERSION 0x9000
+#define EXIFTAGID_EXIF_VERSION \
+  CONSTRUCT_TAGID(EXIF_VERSION, _ID_EXIF_VERSION)
+#define EXIFTAGTYPE_EXIF_VERSION EXIF_UNDEFINED
+// Date and time of original data gerneration
+// Use EXIFTAGTYPE_EXIF_DATE_TIME_ORIGINAL as the exif_tag_type (EXIF_ASCII)
+// It should be 20 characters long including the null-terminating character.
+#define _ID_EXIF_DATE_TIME_ORIGINAL 0x9003
+#define EXIFTAGID_EXIF_DATE_TIME_ORIGINAL \
+  CONSTRUCT_TAGID(EXIF_DATE_TIME_ORIGINAL, _ID_EXIF_DATE_TIME_ORIGINAL)
+#define EXIFTAGTYPE_EXIF_DATE_TIME_ORIGINAL EXIF_ASCII
+// Date and time of digital data generation
+// Use EXIFTAGTYPE_EXIF_DATE_TIME_DIGITIZED as the exif_tag_type (EXIF_ASCII)
+// It should be 20 characters long including the null-terminating character.
+#define _ID_EXIF_DATE_TIME_DIGITIZED 0x9004
+#define EXIFTAGID_EXIF_DATE_TIME_DIGITIZED \
+  CONSTRUCT_TAGID(EXIF_DATE_TIME_DIGITIZED, _ID_EXIF_DATE_TIME_DIGITIZED)
+#define EXIFTAGTYPE_EXIF_DATE_TIME_DIGITIZED EXIF_ASCII
+// Meaning of each component
+// Use EXIFTAGTYPE_EXIF_COMPONENTS_CONFIG as the exif_tag_type (EXIF_UNDEFINED)
+// Count should be 4
+#define _ID_EXIF_COMPONENTS_CONFIG 0x9101
+#define EXIFTAGID_EXIF_COMPONENTS_CONFIG \
+  CONSTRUCT_TAGID(EXIF_COMPONENTS_CONFIG, _ID_EXIF_COMPONENTS_CONFIG)
+#define EXIFTAGTYPE_EXIF_COMPONENTS_CONFIG EXIF_UNDEFINED
+// Meaning of Image compression mode
+// Use EXIFTAGTYPE_EXIF_COMPRESSED_BITS_PER_PIXEL as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_EXIF_COMPRESSED_BITS_PER_PIXEL 0x9102
+#define EXIFTAGID_EXIF_COMPRESSED_BITS_PER_PIXEL \
+  CONSTRUCT_TAGID(EXIF_COMPRESSED_BITS_PER_PIXEL, _ID_EXIF_COMPRESSED_BITS_PER_PIXEL)
+#define EXIFTAGTYPE_EXIF_COMPRESSED_BITS_PER_PIXEL EXIF_RATIONAL
+// Shutter speed
+// Use EXIFTAGTYPE_SHUTTER_SPEED as the exif_tag_type (EXIF_SRATIONAL)
+// Count should be 1
+#define _ID_SHUTTER_SPEED 0x9201
+#define EXIFTAGID_SHUTTER_SPEED \
+  CONSTRUCT_TAGID(SHUTTER_SPEED, _ID_SHUTTER_SPEED)
+#define EXIFTAGTYPE_SHUTTER_SPEED EXIF_SRATIONAL
+// Aperture
+// Use EXIFTAGTYPE_APERTURE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_APERTURE 0x9202
+#define EXIFTAGID_APERTURE CONSTRUCT_TAGID(APERTURE, _ID_APERTURE)
+#define EXIFTAGTYPE_APERTURE EXIF_RATIONAL
+// Brigthness
+// Use EXIFTAGTYPE_BRIGHTNESS as the exif_tag_type (EXIF_SRATIONAL)
+// Count should be 1
+#define _ID_BRIGHTNESS 0x9203
+#define EXIFTAGID_BRIGHTNESS CONSTRUCT_TAGID(BRIGHTNESS, _ID_BRIGHTNESS)
+#define EXIFTAGTYPE_BRIGHTNESS EXIF_SRATIONAL
+// Exposure bias
+// Use EXIFTAGTYPE_EXPOSURE_BIAS_VALUE as the exif_tag_type (EXIF_SRATIONAL)
+// Count should be 1
+#define _ID_EXPOSURE_BIAS_VALUE 0x9204
+#define EXIFTAGID_EXPOSURE_BIAS_VALUE \
+  CONSTRUCT_TAGID(EXPOSURE_BIAS_VALUE, _ID_EXPOSURE_BIAS_VALUE)
+#define EXIFTAGTYPE_EXPOSURE_BIAS_VALUE EXIF_SRATIONAL
+// Maximum lens aperture
+// Use EXIFTAGTYPE_MAX_APERTURE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_MAX_APERTURE 0x9205
+#define EXIFTAGID_MAX_APERTURE CONSTRUCT_TAGID(MAX_APERTURE, _ID_MAX_APERTURE)
+#define EXIFTAGTYPE_MAX_APERTURE EXIF_RATIONAL
+// Subject distance
+// Use EXIFTAGTYPE_SUBJECT_DISTANCE as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_SUBJECT_DISTANCE 0x9206
+#define EXIFTAGID_SUBJECT_DISTANCE \
+  CONSTRUCT_TAGID(SUBJECT_DISTANCE, _ID_SUBJECT_DISTANCE)
+#define EXIFTAGTYPE_SUBJECT_DISTANCE EXIF_RATIONAL
+// Metering mode
+// Use EXIFTAGTYPE_METERING_MODE as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_METERING_MODE 0x9207
+#define EXIFTAGID_METERING_MODE \
+  CONSTRUCT_TAGID(METERING_MODE, _ID_METERING_MODE)
+#define EXIFTAGTYPE_METERING_MODE EXIF_SHORT
+// Light source
+// Use EXIFTAGTYPE_LIGHT_SOURCE as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_LIGHT_SOURCE 0x9208
+#define EXIFTAGID_LIGHT_SOURCE CONSTRUCT_TAGID(LIGHT_SOURCE, _ID_LIGHT_SOURCE)
+#define EXIFTAGTYPE_LIGHT_SOURCE EXIF_SHORT
+// Flash
+// Use EXIFTAGTYPE_FLASH as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_FLASH 0x9209
+#define EXIFTAGID_FLASH CONSTRUCT_TAGID(FLASH, _ID_FLASH)
+#define EXIFTAGTYPE_FLASH EXIF_SHORT
+// Lens focal length
+// Use EXIFTAGTYPE_FOCAL_LENGTH as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_FOCAL_LENGTH 0x920a
+#define EXIFTAGID_FOCAL_LENGTH CONSTRUCT_TAGID(FOCAL_LENGTH, _ID_FOCAL_LENGTH)
+#define EXIFTAGTYPE_FOCAL_LENGTH EXIF_RATIONAL
+// Subject area
+// Use EXIFTAGTYPE_SUBJECT_AREA as exif_tag_type (EXIF_SHORT)
+// Count should be 2 or 3 or 4
+#define _ID_SUBJECT_AREA 0x9214
+#define EXIFTAGID_SUBJECT_AREA CONSTRUCT_TAGID(SUBJECT_AREA, _ID_SUBJECT_AREA)
+#define EXIFTAGTYPE_SUBJECT_AREA EXIF_SHORT
+// Maker note
+// Use EXIFTAGTYPE_EXIF_MAKER_NOTE as the exif_tag_type (EXIF_UNDEFINED)
+// Count can be any
+#define _ID_EXIF_MAKER_NOTE 0x927c
+#define EXIFTAGID_EXIF_MAKER_NOTE \
+  CONSTRUCT_TAGID(EXIF_MAKER_NOTE, _ID_EXIF_MAKER_NOTE)
+#define EXIFTAGTYPE_EXIF_MAKER_NOTE EXIF_UNDEFINED
+// User comments
+// Use EXIFTAGTYPE_EXIF_USER_COMMENT as the exif_tag_type (EXIF_UNDEFINED)
+// Count can be any
+#define _ID_EXIF_USER_COMMENT 0x9286
+#define EXIFTAGID_EXIF_USER_COMMENT \
+  CONSTRUCT_TAGID(EXIF_USER_COMMENT, _ID_EXIF_USER_COMMENT)
+#define EXIFTAGTYPE_EXIF_USER_COMMENT EXIF_UNDEFINED
+// Date time sub-seconds
+// Use EXIFTAGTYPE_SUBSEC_TIME as the exif_tag_type (EXIF_ASCII)
+// Count could be any
+#define _ID_SUBSEC_TIME 0x9290
+#define EXIFTAGID_SUBSEC_TIME CONSTRUCT_TAGID(SUBSEC_TIME, _ID_SUBSEC_TIME)
+#define EXIFTAGTYPE_SEBSEC_TIME EXIF_ASCII
+// Date time original sub-seconds
+// use EXIFTAGTYPE_SUBSEC_TIME_ORIGINAL as the exif_tag_type (EXIF_ASCII)
+// Count could be any
+#define _ID_SUBSEC_TIME_ORIGINAL 0x9291
+#define EXIFTAGID_SUBSEC_TIME_ORIGINAL \
+  CONSTRUCT_TAGID(SUBSEC_TIME_ORIGINAL, _ID_SUBSEC_TIME_ORIGINAL)
+#define EXIFTAGTYPE_SUBSEC_TIME_ORIGINAL EXIF_ASCII
+// Date time digitized sub-seconds
+// use EXIFTAGTYPE_SUBSEC_TIME_DIGITIZED as the exif_tag_type (EXIF_ASCII)
+// Count could be any
+#define _ID_SUBSEC_TIME_DIGITIZED 0x9292
+#define EXIFTAGID_SUBSEC_TIME_DIGITIZED \
+  CONSTRUCT_TAGID(SUBSEC_TIME_DIGITIZED, _ID_SUBSEC_TIME_DIGITIZED)
+#define EXIFTAGTYPE_SUBSEC_TIME_DIGITIZED EXIF_ASCII
+// Supported Flashpix version
+// Use EXIFTAGTYPE_EXIF_FLASHPIX_VERSION as the exif_tag_type (EXIF_UNDEFINED)
+// Count should be 4
+#define _ID_EXIF_FLASHPIX_VERSION 0xa000
+#define EXIFTAGID_EXIF_FLASHPIX_VERSION \
+  CONSTRUCT_TAGID(EXIF_FLASHPIX_VERSION, _ID_EXIF_FLASHPIX_VERSION)
+#define EXIFTAGTYPE_EXIF_FLASHPIX_VERSION EXIF_UNDEFINED
+//  Color space information
+// Use EXIFTAGTYPE_EXIF_COLOR_SPACE as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_EXIF_COLOR_SPACE 0xa001
+#define EXIFTAGID_EXIF_COLOR_SPACE \
+  CONSTRUCT_TAGID(EXIF_COLOR_SPACE, _ID_EXIF_COLOR_SPACE)
+#define EXIFTAGTYPE_EXIF_COLOR_SPACE EXIF_SHORT
+//  Valid image width
+// Use EXIFTAGTYPE_EXIF_PIXEL_X_DIMENSION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_EXIF_PIXEL_X_DIMENSION 0xa002
+#define EXIFTAGID_EXIF_PIXEL_X_DIMENSION \
+  CONSTRUCT_TAGID(EXIF_PIXEL_X_DIMENSION, _ID_EXIF_PIXEL_X_DIMENSION)
+#define EXIFTAGTYPE_EXIF_PIXEL_X_DIMENSION EXIF_SHORT
+// Valid image height
+// Use EXIFTAGTYPE_EXIF_PIXEL_Y_DIMENSION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_EXIF_PIXEL_Y_DIMENSION 0xa003
+#define EXIFTAGID_EXIF_PIXEL_Y_DIMENSION \
+  CONSTRUCT_TAGID(EXIF_PIXEL_Y_DIMENSION, _ID_EXIF_PIXEL_Y_DIMENSION)
+#define EXIFTAGTYPE_EXIF_PIXEL_Y_DIMENSION  EXIF_SHORT
+// Related audio file
+// Use EXIFTAGTYPE_EXIF_RELATED_SOUND_FILE as the exif_tag_type (EXIF_ASCII)
+// Count should be 13
+#define _ID_RELATED_SOUND_FILE 0xa004
+#define EXIFTAGID_RELATED_SOUND_FILE \
+  CONSTRUCT_TAGID(RELATED_SOUND_FILE, _ID_RELATED_SOUND_FILE)
+#define EXIFTAGTYPE_RELATED_SOUND_FILE EXIF_ASCII
+// Interop IFD pointer (NOT INTENDED to be accessible to user)
+#define _ID_INTEROP_IFD_PTR 0xa005
+#define EXIFTAGID_INTEROP_IFD_PTR CONSTRUCT_TAGID(INTEROP, _ID_INTEROP_IFD_PTR)
+#define EXIFTAGTYPE_INTEROP_IFD_PTR EXIF_LONG
+// Flash energy
+// Use EXIFTAGTYPE_EXIF_FLASH_ENERGY as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_FLASH_ENERGY 0xa20b
+#define EXIFTAGID_FLASH_ENERGY CONSTRUCT_TAGID(FLASH_ENERGY, _ID_FLASH_ENERGY)
+#define EXIFTAGTYPE_FLASH_ENERGY EXIF_RATIONAL
+// Spatial frequency response
+// Use EXIFTAGTYPE_SPATIAL_FREQ_RESPONSE as exif_tag_type (EXIF_UNDEFINED)
+// Count would be any
+#define _ID_SPATIAL_FREQ_RESPONSE 0xa20c
+#define EXIFTAGID_SPATIAL_FREQ_RESPONSE \
+  CONSTRUCT_TAGID(SPATIAL_FREQ_RESPONSE, _ID_SPATIAL_FREQ_RESPONSE)
+#define EXIFTAGTYPE_SPATIAL_FREQ_RESPONSE EXIF_UNDEFINED
+// Focal plane x resolution
+// Use EXIFTAGTYPE_FOCAL_PLANE_X_RESOLUTION as exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_FOCAL_PLANE_X_RESOLUTION 0xa20e
+#define EXIFTAGID_FOCAL_PLANE_X_RESOLUTION \
+  CONSTRUCT_TAGID(FOCAL_PLANE_X_RESOLUTION, _ID_FOCAL_PLANE_X_RESOLUTION)
+#define EXIFTAGTYPE_FOCAL_PLANE_X_RESOLUTION EXIF_RATIONAL
+// Focal plane y resolution
+// Use EXIFTAGTYPE_FOCAL_PLANE_Y_RESOLUTION as exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_FOCAL_PLANE_Y_RESOLUTION 0xa20f
+#define EXIFTAGID_FOCAL_PLANE_Y_RESOLUTION \
+  CONSTRUCT_TAGID(FOCAL_PLANE_Y_RESOLUTION, _ID_FOCAL_PLANE_Y_RESOLUTION)
+#define EXIFTAGTYPE_FOCAL_PLANE_Y_RESOLUTION EXIF_RATIONAL
+// Focal plane  resolution unit
+// Use EXIFTAGTYPE_FOCAL_PLANE_RESOLUTION_UNIT as exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_FOCAL_PLANE_RESOLUTION_UNIT 0xa210
+#define EXIFTAGID_FOCAL_PLANE_RESOLUTION_UNIT \
+  CONSTRUCT_TAGID(FOCAL_PLANE_RESOLUTION_UNIT, _ID_FOCAL_PLANE_RESOLUTION_UNIT)
+#define EXIFTAGTYPE_FOCAL_PLANE_RESOLUTION_UNIT EXIF_SHORT
+// Subject location
+// Use EXIFTAGTYPE_SUBJECT_LOCATION as the exif_tag_type (EXIF_SHORT)
+// Count should be 2
+#define _ID_SUBJECT_LOCATION 0xa214
+#define EXIFTAGID_SUBJECT_LOCATION \
+  CONSTRUCT_TAGID(SUBJECT_LOCATION, _ID_SUBJECT_LOCATION)
+#define EXIFTAGTYPE_SUBJECT_LOCATION EXIF_SHORT
+// Exposure index
+// Use EXIFTAGTYPE_EXPOSURE_INDEX as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_EXPOSURE_INDEX 0xa215
+#define EXIFTAGID_EXPOSURE_INDEX \
+  CONSTRUCT_TAGID(EXPOSURE_INDEX, _ID_EXPOSURE_INDEX)
+#define EXIFTAGTYPE_EXPOSURE_INDEX EXIF_RATIONAL
+// Sensing method
+// Use EXIFTAGTYPE_SENSING_METHOD as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_SENSING_METHOD 0xa217
+#define EXIFTAGID_SENSING_METHOD \
+  CONSTRUCT_TAGID(SENSING_METHOD, _ID_SENSING_METHOD)
+#define EXIFTAGTYPE_SENSING_METHOD EXIF_SHORT
+// File source
+// Use EXIFTAGTYPE_FILE_SOURCE as the exif_tag_type (EXIF_UNDEFINED)
+// Count should be 1
+#define _ID_FILE_SOURCE 0xa300
+#define EXIFTAGID_FILE_SOURCE CONSTRUCT_TAGID(FILE_SOURCE, _ID_FILE_SOURCE)
+#define EXIFTAGTYPE_FILE_SOURCE EXIF_UNDEFINED
+// Scene type
+// Use EXIFTAGTYPE_SCENE_TYPE as the exif_tag_type (EXIF_UNDEFINED)
+// Count should be 1
+#define _ID_SCENE_TYPE 0xa301
+#define EXIFTAGID_SCENE_TYPE CONSTRUCT_TAGID(SCENE_TYPE, _ID_SCENE_TYPE)
+#define EXIFTAGTYPE_SCENE_TYPE EXIF_UNDEFINED
+// CFA pattern
+// Use EXIFTAGTYPE_CFA_PATTERN as the exif_tag_type (EXIF_UNDEFINED)
+// Count can be any
+#define _ID_CFA_PATTERN 0xa302
+#define EXIFTAGID_CFA_PATTERN CONSTRUCT_TAGID(CFA_PATTERN, _ID_CFA_PATTERN)
+#define EXIFTAGTYPE_CFA_PATTERN EXIF_UNDEFINED
+// Custom image processing
+// Use EXIFTAGTYPE_CUSTOM_RENDERED as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_CUSTOM_RENDERED 0xa401
+#define EXIFTAGID_CUSTOM_RENDERED \
+  CONSTRUCT_TAGID(CUSTOM_RENDERED, _ID_CUSTOM_RENDERED)
+#define EXIFTAGTYPE_CUSTOM_RENDERED EXIF_SHORT
+// Exposure mode
+// Use EXIFTAGTYPE_EXPOSURE_MODE as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_EXPOSURE_MODE 0xa402
+#define EXIFTAGID_EXPOSURE_MODE \
+  CONSTRUCT_TAGID(EXPOSURE_MODE, _ID_EXPOSURE_MODE)
+#define EXIFTAGTYPE_EXPOSURE_MODE EXIF_SHORT
+// White balance
+// Use EXIFTAGTYPE_WHITE_BALANCE as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_WHITE_BALANCE 0xa403
+#define EXIFTAGID_WHITE_BALANCE \
+  CONSTRUCT_TAGID(WHITE_BALANCE, _ID_WHITE_BALANCE)
+#define EXIFTAGTYPE_WHITE_BALANCE EXIF_SHORT
+// Digital zoom ratio
+// Use EXIFTAGTYPE_DIGITAL_ZOOM_RATIO as the exif_tag_type (EXIF_RATIONAL)
+// Count should be 1
+#define _ID_DIGITAL_ZOOM_RATIO 0xa404
+#define EXIFTAGID_DIGITAL_ZOOM_RATIO \
+  CONSTRUCT_TAGID(DIGITAL_ZOOM_RATIO, _ID_DIGITAL_ZOOM_RATIO)
+#define EXIFTAGTYPE_DIGITAL_ZOOM_RATIO EXIF_RATIONAL
+// Focal length in 35mm film
+// Use EXIFTAGTYPE_FOCAL_LENGTH_35MM as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_FOCAL_LENGTH_35MM 0xa405
+#define EXIFTAGID_FOCAL_LENGTH_35MM CONSTRUCT_TAGID(FOCAL_LENGTH_35MM, _ID_FOCAL_LENGTH_35MM)
+#define EXIFTAGTYPE_FOCAL_LENGTH_35MM EXIF_SHORT
+// Scene capture type
+// Use EXIFTAGTYPE_SCENE_CAPTURE_TYPE as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_SCENE_CAPTURE_TYPE 0xa406
+#define EXIFTAGID_SCENE_CAPTURE_TYPE \
+  CONSTRUCT_TAGID(SCENE_CAPTURE_TYPE, _ID_SCENE_CAPTURE_TYPE)
+#define EXIFTAGTYPE_SCENE_CAPTURE_TYPE EXIF_SHORT
+// Gain control
+// Use EXIFTAGTYPE_GAIN_CONTROL as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_GAIN_CONTROL 0xa407
+#define EXIFTAGID_GAIN_CONTROL CONSTRUCT_TAGID(GAIN_CONTROL, _ID_GAIN_CONTROL)
+#define EXIFTAGTYPE_GAIN_CONTROL EXIF_SHORT
+// Contrast
+// Use EXIFTAGTYPE_CONTRAST as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_CONTRAST 0xa408
+#define EXIFTAGID_CONTRAST CONSTRUCT_TAGID(CONTRAST, _ID_CONTRAST)
+#define EXIFTAGTYPE_CONTRAST EXIF_SHORT
+// Saturation
+// Use EXIFTAGTYPE_SATURATION as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_SATURATION  0xa409
+#define EXIFTAGID_SATURATION CONSTRUCT_TAGID(SATURATION, _ID_SATURATION)
+#define EXIFTAGTYPE_SATURATION EXIF_SHORT
+// Sharpness
+// Use EXIFTAGTYPE_SHARPNESS as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_SHARPNESS 0xa40a
+#define EXIFTAGID_SHARPNESS CONSTRUCT_TAGID(SHARPNESS, _ID_SHARPNESS)
+#define EXIFTAGTYPE_SHARPNESS EXIF_SHORT
+// Device settings description
+// Use EXIFTAGID_DEVICE_SETTINGS_DESCRIPTION as exif_tag_type (EXIF_UNDEFINED)
+// Count could be any
+#define _ID_DEVICE_SETTINGS_DESCRIPTION 0xa40b
+#define EXIFTAGID_DEVICE_SETTINGS_DESCRIPTION \
+  CONSTRUCT_TAGID(DEVICE_SETTINGS_DESCRIPTION, _ID_DEVICE_SETTINGS_DESCRIPTION)
+#define EXIFTAGTYPE_DEVIC_SETTIGNS_DESCRIPTION EXIF_UNDEFINED
+// Subject distance range
+// Use EXIFTAGTYPE_SUBJECT_DISTANCE_RANGE as the exif_tag_type (EXIF_SHORT)
+// Count should be 1
+#define _ID_SUBJECT_DISTANCE_RANGE 0xa40c
+#define EXIFTAGID_SUBJECT_DISTANCE_RANGE \
+  CONSTRUCT_TAGID(SUBJECT_DISTANCE_RANGE, _ID_SUBJECT_DISTANCE_RANGE)
+#define EXIFTAGTYPE_SUBJECT_DISTANCE_RANGE EXIF_SHORT
+// Unique image id
+// Use EXIFTAG_TYPE_IMAGE_UIDas the exif_tag_type (EXIF_ASCII)
+// Count should be 33
+#define _ID_IMAGE_UID 0xa420
+#define EXIFTAGID_IMAGE_UID CONSTRUCT_TAGID(IMAGE_UID, _ID_IMAGE_UID)
+#define EXIFTAGTYPE_IMAGE_UID EXIF_ASCII
+// PIM tag
+// Use EXIFTAGTYPE_PIM_TAG as the exif_tag_type (EXIF_UNDEFINED)
+// Count can be any
+#define _ID_PIM 0xc4a5
+#define EXIFTAGID_PIM_TAG CONSTRUCT_TAGID(PIM, _ID_PIM)
+#define EXIFTAGTYPE_PIM_TAG EXIF_UNDEFINED
+#endif // __QEXIF_H__
+
diff --git a/camera/mm-image-codec/qomx_core/Android.mk b/camera/mm-image-codec/qomx_core/Android.mk
new file mode 100644
index 0000000..4d4e39c
--- /dev/null
+++ b/camera/mm-image-codec/qomx_core/Android.mk
@@ -0,0 +1,28 @@
+OMX_CORE_PATH := $(call my-dir)
+
+# ------------------------------------------------------------------------------
+#                Make the shared library (libqomx_core)
+# ------------------------------------------------------------------------------
+
+include $(CLEAR_VARS)
+LOCAL_PATH := $(OMX_CORE_PATH)
+LOCAL_MODULE_TAGS := optional
+
+omx_core_defines:= -Werror \
+                   -g -O0
+
+LOCAL_CFLAGS := $(omx_core_defines)
+
+OMX_HEADER_DIR := frameworks/native/include/media/openmax
+
+LOCAL_C_INCLUDES := $(OMX_HEADER_DIR)
+LOCAL_C_INCLUDES += $(LOCAL_PATH)/../qexif
+
+LOCAL_SRC_FILES := qomx_core.c
+
+LOCAL_MODULE           := libqomx_core
+LOCAL_PRELINK_MODULE   := false
+LOCAL_SHARED_LIBRARIES := libcutils libdl
+
+LOCAL_32_BIT_ONLY := true
+include $(BUILD_SHARED_LIBRARY)
diff --git a/camera/mm-image-codec/qomx_core/QOMX_JpegExtensions.h b/camera/mm-image-codec/qomx_core/QOMX_JpegExtensions.h
new file mode 100644
index 0000000..a82a910
--- /dev/null
+++ b/camera/mm-image-codec/qomx_core/QOMX_JpegExtensions.h
@@ -0,0 +1,328 @@
+/*Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
+
+#ifndef __QOMX_EXTENSIONS_H__
+#define __QOMX_EXTENSIONS_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <OMX_Image.h>
+#include <qexif.h>
+
+/** qomx_image_eventd
+*  Qcom specific events extended from OMX_EVENT
+*  @ OMX_EVENT_THUMBNAIL_DROPPED - Indicates that the thumbnail
+*                                 size id too big to be included
+*                                 in the exif and will be
+*                                 dropped
+**/
+typedef enum {
+ OMX_EVENT_THUMBNAIL_DROPPED = OMX_EventVendorStartUnused+1
+} QOMX_IMAGE_EXT_EVENTS;
+
+/**
+*  The following macros defines the string to be used for
+*  getting the extension indices.
+**/
+#define QOMX_IMAGE_EXT_EXIF_NAME                  "OMX.QCOM.image.exttype.exif"
+#define QOMX_IMAGE_EXT_THUMBNAIL_NAME        "OMX.QCOM.image.exttype.thumbnail"
+#define QOMX_IMAGE_EXT_BUFFER_OFFSET_NAME "OMX.QCOM.image.exttype.bufferOffset"
+#define QOMX_IMAGE_EXT_MOBICAT_NAME            "OMX.QCOM.image.exttype.mobicat"
+#define QOMX_IMAGE_EXT_ENCODING_MODE_NAME        "OMX.QCOM.image.encoding.mode"
+#define QOMX_IMAGE_EXT_WORK_BUFFER_NAME      "OMX.QCOM.image.exttype.workbuffer"
+#define QOMX_IMAGE_EXT_METADATA_NAME      "OMX.QCOM.image.exttype.metadata"
+#define QOMX_IMAGE_EXT_META_ENC_KEY_NAME      "OMX.QCOM.image.exttype.metaEncKey"
+#define QOMX_IMAGE_EXT_MEM_OPS_NAME      "OMX.QCOM.image.exttype.mem_ops"
+#define QOMX_IMAGE_EXT_JPEG_SPEED_NAME      "OMX.QCOM.image.exttype.jpeg.speed"
+
+/** QOMX_IMAGE_EXT_INDEXTYPE
+*  This enum is an extension of the OMX_INDEXTYPE enum and
+*  specifies Qcom supported extention indexes. These indexes are
+*  associated with the extension names and can be used as
+*  Indexes in the SetParameter and Getparameter functins to set
+*  or get values from qcom specific data structures
+**/
+typedef enum {
+  //Name: OMX.QCOM.image.exttype.exif
+  QOMX_IMAGE_EXT_EXIF = 0x07F00000,
+
+  //Name: OMX.QCOM.image.exttype.thumbnail
+  QOMX_IMAGE_EXT_THUMBNAIL = 0x07F00001,
+
+  //Name: OMX.QCOM.image.exttype.bufferOffset
+  QOMX_IMAGE_EXT_BUFFER_OFFSET = 0x07F00002,
+
+  //Name: OMX.QCOM.image.exttype.mobicat
+  QOMX_IMAGE_EXT_MOBICAT = 0x07F00003,
+
+  //Name: OMX.QCOM.image.encoding.approach
+  QOMX_IMAGE_EXT_ENCODING_MODE = 0x07F00004,
+
+  //Name: OMX.QCOM.image.exttype.workbuffer
+  QOMX_IMAGE_EXT_WORK_BUFFER = 0x07F00005,
+
+  //Name: OMX.QCOM.image.exttype.metadata
+  QOMX_IMAGE_EXT_METADATA = 0x07F00008,
+
+  //Name: OMX.QCOM.image.exttype.metaEncKey
+  QOMX_IMAGE_EXT_META_ENC_KEY = 0x07F00009,
+
+  //Name: OMX.QCOM.image.exttype.memOps
+  QOMX_IMAGE_EXT_MEM_OPS = 0x07F0000A,
+
+  //Name: OMX.QCOM.image.exttype.jpeg.speed
+  QOMX_IMAGE_EXT_JPEG_SPEED = 0x07F000B,
+
+} QOMX_IMAGE_EXT_INDEXTYPE;
+
+/** QOMX_BUFFER_INFO
+*  The structure specifies informaton
+*   associated with the buffers and should be passed as appData
+*   in UseBuffer calls to the OMX component with buffer specific
+*   data. @ fd - FD of the buffer allocated. If the buffer is
+*          allocated on the heap, it can be zero.
+*   @offset - Buffer offset
+**/
+
+typedef struct {
+  OMX_U32 fd;
+  OMX_U32 offset;
+} QOMX_BUFFER_INFO;
+
+/** QEXIF_INFO_DATA
+*   The basic exif structure used to construct
+*   information for a single exif tag.
+*   @tag_entry
+*   @tag_id
+**/
+typedef struct{
+  exif_tag_entry_t tag_entry;
+  exif_tag_id_t tag_id;
+} QEXIF_INFO_DATA;
+
+/** QEXTN_DATA
+*   The structure used to carry addtional payload
+*   meant to be in EXIF Appx marker fields.
+*   @sw_3a_version
+**/
+typedef struct {
+  uint16_t sw_3a_version[4];
+} QEXTN_DATA;
+
+/**QOMX_EXIF_INFO
+*  The structure contains an array of exif tag
+*  structures(qexif_info_data) and should be passed to the OMX
+*  layer by the OMX client using the extension index.
+*  @exif_data - Array of exif tags
+*  @numOfEntries - Number of exif tags entries being passed in
+*                  the array
+*  @debug_data - specific debug information for internal use
+**/
+typedef struct {
+  QEXIF_INFO_DATA *exif_data;
+  OMX_U32 numOfEntries;
+  QEXTN_DATA debug_data;
+} QOMX_EXIF_INFO;
+
+/**QOMX_YUV_FRAME_INFO
+*  The structre contains all the offsets
+*  associated with the Y and cbcr buffers.
+*  @yOffset - Offset within the Y buffer
+*  @cbcrOffset - Offset within the cb/cr buffer. The array
+*                should be populated in order depending on cb
+*                first or cr first in case of planar data. For
+*                pseusoplanar, only the first array element
+*                needs to be filled and the secnd element should
+*                be set to zero.
+*  @cbcrStartOffset - Start offset of the cb/cr buffer starting
+*                     starting from the Y buffer. The array
+*                     should be populated in order depending on
+*                     cb first or cr first in case of planar
+*                     data. For pseusoplanar, only the first
+*                     array element needs to be filled and the
+*                     secnd element should be set to zero.
+**/
+typedef struct {
+  OMX_U32 yOffset;
+  OMX_U32 cbcrOffset[2];
+  OMX_U32 cbcrStartOffset[2];
+} QOMX_YUV_FRAME_INFO;
+
+/** qomx_thumbnail_info
+*  Includes all information associated with the thumbnail
+*  @input_width - Width of the input thumbnail buffer
+*  @input_height - Heighr of the input thumbnail buffer
+*  @scaling_enabled - Flag indicating if thumbnail scaling is
+*  enabled.
+*  @quality - JPEG Q factor value in the range of 1-100. A factor of 1
+ *               produces the smallest, worst quality images, and a factor
+ *               of 100 produces the largest, best quality images.  A
+ *               typical default is 75 for small good quality images.
+*  @crop_info - Includes the crop width, crop height,
+*               horizontal and vertical offsets.
+*  @output_width - Output Width of the the thumbnail. This is
+*                the width after scaling if scaling is enabled
+*                or width after cropping if only cropping is
+*                enabled or same same input width otherwise
+*  @output_height - Output height of the thumbnail. This is
+*                the height after scaling if scaling is enabled
+*                or height after cropping if only cropping is
+*                enabled or same same input height otherwise
+**/
+typedef struct {
+  OMX_U32 input_width;
+  OMX_U32 input_height;
+  OMX_U8 scaling_enabled;
+  OMX_U32 quality;
+  OMX_CONFIG_RECTTYPE crop_info;
+  OMX_U32 output_width;
+  OMX_U32 output_height;
+  QOMX_YUV_FRAME_INFO tmbOffset;
+  OMX_U32 rotation;
+} QOMX_THUMBNAIL_INFO;
+
+/**qomx_mobicat
+*  Mobicat data to padded tot he OMX layer
+*  @mobicatData - Mobicate data
+*  @mobicatDataLength - length of the mobicat data
+**/
+typedef struct {
+  OMX_U8 *mobicatData;
+  OMX_U32 mobicatDataLength;
+} QOMX_MOBICAT;
+
+/**qomx_workbuffer
+*  Ion buffer to be used for the H/W encoder
+*  @fd - FD of the buffer allocated
+*  @vaddr - Buffer address
+**/
+typedef struct {
+  int fd;
+  uint8_t *vaddr;
+  uint32_t length;
+} QOMX_WORK_BUFFER;
+
+/**QOMX_METADATA
+ *
+ * meta data to be set in EXIF
+ */
+typedef struct {
+  OMX_U8  *metadata;
+  OMX_U32 metaPayloadSize;
+  OMX_U8 mobicat_mask;
+} QOMX_METADATA;
+
+/**QOMX_META_ENC_KEY
+ *
+ * meta data encryption key
+ */
+typedef struct {
+  OMX_U8  *metaKey;
+  OMX_U32 keyLen;
+} QOMX_META_ENC_KEY;
+
+/** QOMX_IMG_COLOR_FORMATTYPE
+*  This enum is an extension of the OMX_COLOR_FORMATTYPE enum.
+*  It specifies Qcom supported color formats.
+**/
+typedef enum QOMX_IMG_COLOR_FORMATTYPE {
+  OMX_QCOM_IMG_COLOR_FormatYVU420SemiPlanar = OMX_COLOR_FormatVendorStartUnused + 0x300,
+  OMX_QCOM_IMG_COLOR_FormatYVU422SemiPlanar,
+  OMX_QCOM_IMG_COLOR_FormatYVU422SemiPlanar_h1v2,
+  OMX_QCOM_IMG_COLOR_FormatYUV422SemiPlanar_h1v2,
+  OMX_QCOM_IMG_COLOR_FormatYVU444SemiPlanar,
+  OMX_QCOM_IMG_COLOR_FormatYUV444SemiPlanar,
+  OMX_QCOM_IMG_COLOR_FormatYVU420Planar,
+  OMX_QCOM_IMG_COLOR_FormatYVU422Planar,
+  OMX_QCOM_IMG_COLOR_FormatYVU422Planar_h1v2,
+  OMX_QCOM_IMG_COLOR_FormatYUV422Planar_h1v2,
+  OMX_QCOM_IMG_COLOR_FormatYVU444Planar,
+  OMX_QCOM_IMG_COLOR_FormatYUV444Planar
+} QOMX_IMG_COLOR_FORMATTYPE;
+
+/** QOMX_ENCODING_MODE
+*  This enum is used to select parallel encoding
+*  or sequential encoding for the thumbnail and
+*  main image
+**/
+typedef enum {
+  OMX_Serial_Encoding,
+  OMX_Parallel_Encoding
+} QOMX_ENCODING_MODE;
+
+
+/**omx_jpeg_ouput_buf_t
+*  Structure describing jpeg output buffer
+*  @handle - Handle to the containing class
+*  @mem_hdl - Handle to camera memory struct
+*  @vaddr - Buffer address
+*  @size - Buffer size
+*  @fd - file descriptor
+**/
+typedef struct {
+  void *handle;
+  void *mem_hdl;
+  int8_t isheap;
+  size_t size; /*input*/
+  void *vaddr;
+  int fd;
+} omx_jpeg_ouput_buf_t;
+
+/** QOMX_MEM_OPS
+* Structure holding the function pointers to
+* buffer memory operations
+* @get_memory - function to allocate buffer memory
+**/
+typedef struct {
+  int (*get_memory)( omx_jpeg_ouput_buf_t *p_out_buf);
+} QOMX_MEM_OPS;
+
+/** QOMX_JPEG_SPEED_MODE
+* Enum specifying the values for the jpeg
+* speed mode setting
+**/
+typedef enum {
+  QOMX_JPEG_SPEED_MODE_NORMAL,
+  QOMX_JPEG_SPEED_MODE_HIGH
+} QOMX_JPEG_SPEED_MODE;
+
+/** QOMX_JPEG_SPEED
+* Structure used to set the jpeg speed mode
+* parameter
+* @speedMode - jpeg speed mode
+**/
+typedef struct {
+  QOMX_JPEG_SPEED_MODE speedMode;
+} QOMX_JPEG_SPEED;
+
+#ifdef __cplusplus
+ }
+#endif
+
+#endif
diff --git a/camera/mm-image-codec/qomx_core/qomx_core.c b/camera/mm-image-codec/qomx_core/qomx_core.c
new file mode 100644
index 0000000..6afb2a6
--- /dev/null
+++ b/camera/mm-image-codec/qomx_core/qomx_core.c
@@ -0,0 +1,369 @@
+/*Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
+
+#define LOG_NDEBUG 0
+#define LOG_NIDEBUG 0
+#define LOG_TAG "qomx_image_core"
+#include <utils/Log.h>
+
+#include "qomx_core.h"
+
+#define BUFF_SIZE 255
+
+static omx_core_t *g_omxcore;
+static pthread_mutex_t g_omxcore_lock = PTHREAD_MUTEX_INITIALIZER;
+static int g_omxcore_cnt = 0;
+
+//Map the library name with the component name
+static const comp_info_t g_comp_info[] =
+{
+  { "OMX.qcom.image.jpeg.encoder", "libqomx_jpegenc.so" },
+  { "OMX.qcom.image.jpeg.decoder", "libqomx_jpegdec.so" },
+  { "OMX.qcom.image.jpeg.encoder_pipeline", "libqomx_jpegenc_pipe.so" }
+};
+
+static int get_idx_from_handle(OMX_IN OMX_HANDLETYPE *ahComp, int *acompIndex,
+  int *ainstanceIndex);
+
+/*==============================================================================
+* Function : OMX_Init
+* Parameters: None
+* Description: This is the first call that is made to the OMX Core
+* and initializes the OMX IL core
+==============================================================================*/
+OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_Init()
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+  int i = 0;
+  int comp_cnt = sizeof(g_comp_info)/sizeof(g_comp_info[0]);
+
+  pthread_mutex_lock(&g_omxcore_lock);
+
+  /* check if core is created */
+  if (g_omxcore) {
+    g_omxcore_cnt++;
+    pthread_mutex_unlock(&g_omxcore_lock);
+    return rc;
+  }
+
+  if (comp_cnt > OMX_COMP_MAX_NUM) {
+    ALOGE("%s:%d] cannot exceed max number of components",
+      __func__, __LINE__);
+    pthread_mutex_unlock(&g_omxcore_lock);
+    return OMX_ErrorUndefined;
+  }
+  /* create new global object */
+  g_omxcore = malloc(sizeof(omx_core_t));
+  if (g_omxcore) {
+    memset(g_omxcore, 0x0, sizeof(omx_core_t));
+
+    /* populate the library name and component name */
+    for (i = 0; i < comp_cnt; i++) {
+      g_omxcore->component[i].comp_name = g_comp_info[i].comp_name;
+      g_omxcore->component[i].lib_name = g_comp_info[i].lib_name;
+    }
+    g_omxcore->comp_cnt = comp_cnt;
+    g_omxcore_cnt++;
+  } else {
+    rc = OMX_ErrorInsufficientResources;
+  }
+  pthread_mutex_unlock(&g_omxcore_lock);
+  ALOGI("%s:%d] Complete %d", __func__, __LINE__, comp_cnt);
+  return rc;
+}
+
+/*==============================================================================
+* Function : OMX_Deinit
+* Parameters: None
+* Return Value : OMX_ERRORTYPE
+* Description: Deinit all the OMX components
+==============================================================================*/
+OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_Deinit()
+{
+  pthread_mutex_lock(&g_omxcore_lock);
+
+  if (g_omxcore_cnt == 1) {
+    if (g_omxcore) {
+      free(g_omxcore);
+      g_omxcore = NULL;
+    }
+  }
+  if (g_omxcore_cnt) {
+    g_omxcore_cnt--;
+  }
+
+  ALOGI("%s:%d] Complete", __func__, __LINE__);
+  pthread_mutex_unlock(&g_omxcore_lock);
+  return OMX_ErrorNone;
+}
+
+/*==============================================================================
+* Function : get_comp_from_list
+* Parameters: componentName
+* Return Value : component_index
+* Description: If the componnt is already present in the list, return the
+* component index. If not return the next index to create the component.
+==============================================================================*/
+static int get_comp_from_list(char *comp_name)
+{
+  int index = -1, i = 0;
+
+  if (NULL == comp_name)
+    return -1;
+
+  for (i = 0; i < g_omxcore->comp_cnt; i++) {
+    if (!strcmp(g_omxcore->component[i].comp_name, comp_name)) {
+      index = i;
+      break;
+    }
+  }
+  return index;
+}
+
+/*==============================================================================
+* Function : get_free_inst_idx
+* Parameters: p_comp
+* Return Value : The next instance index if available
+* Description: Get the next available index for to store the new instance of the
+*            component being created.
+*============================================================================*/
+static int get_free_inst_idx(omx_core_component_t *p_comp)
+{
+  int idx = -1, i = 0;
+
+  for (i = 0; i < OMX_COMP_MAX_INSTANCES; i++) {
+    if (NULL == p_comp->handle[i]) {
+      idx = i;
+      break;
+    }
+  }
+  return idx;
+}
+
+/*==============================================================================
+* Function : OMX_GetHandle
+* Parameters: handle, componentName, appData, callbacks
+* Return Value : OMX_ERRORTYPE
+* Description: Construct and load the requested omx library
+==============================================================================*/
+OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_GetHandle(
+  OMX_OUT OMX_HANDLETYPE* handle,
+  OMX_IN OMX_STRING componentName,
+  OMX_IN OMX_PTR appData,
+  OMX_IN OMX_CALLBACKTYPE* callBacks)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+  int comp_idx = 0, inst_idx = 0;
+  char libName[BUFF_SIZE] = {0};
+  void *p_obj = NULL;
+  OMX_COMPONENTTYPE *p_comp = NULL;
+  omx_core_component_t *p_core_comp = NULL;
+  OMX_BOOL close_handle = OMX_FALSE;
+
+  if (NULL == handle) {
+    ALOGE("%s:%d] Error invalid input ", __func__, __LINE__);
+    return OMX_ErrorBadParameter;
+  }
+
+  pthread_mutex_lock(&g_omxcore_lock);
+
+  comp_idx = get_comp_from_list(componentName);
+  if (comp_idx < 0) {
+    ALOGE("%s:%d] Cannot find the component", __func__, __LINE__);
+    pthread_mutex_unlock(&g_omxcore_lock);
+    return OMX_ErrorInvalidComponent;
+  }
+  p_core_comp = &g_omxcore->component[comp_idx];
+
+  *handle = NULL;
+
+  //If component already present get the instance index
+  inst_idx = get_free_inst_idx(p_core_comp);
+  if (inst_idx < 0) {
+    ALOGE("%s:%d] Cannot alloc new instance", __func__, __LINE__);
+    rc = OMX_ErrorInvalidComponent;
+    goto error;
+  }
+
+  if (FALSE == p_core_comp->open) {
+    /* load the library */
+    p_core_comp->lib_handle = dlopen(p_core_comp->lib_name, RTLD_NOW);
+    if (NULL == p_core_comp->lib_handle) {
+      ALOGE("%s:%d] Cannot load the library", __func__, __LINE__);
+      rc = OMX_ErrorInvalidComponent;
+      goto error;
+    }
+
+    p_core_comp->open = TRUE;
+    /* Init the component and get component functions */
+    p_core_comp->create_comp_func = dlsym(p_core_comp->lib_handle,
+      "create_component_fns");
+    p_core_comp->get_instance = dlsym(p_core_comp->lib_handle, "getInstance");
+
+    close_handle = OMX_TRUE;
+    if (!p_core_comp->create_comp_func || !p_core_comp->get_instance) {
+      ALOGE("%s:%d] Cannot maps the symbols", __func__, __LINE__);
+      rc = OMX_ErrorInvalidComponent;
+      goto error;
+    }
+  }
+
+  /* Call the function from the address to create the obj */
+  p_obj = (*p_core_comp->get_instance)();
+  ALOGI("%s:%d] get instance pts is %p", __func__, __LINE__, p_obj);
+  if (NULL == p_obj) {
+    ALOGE("%s:%d] Error cannot create object", __func__, __LINE__);
+    rc = OMX_ErrorInvalidComponent;
+    goto error;
+  }
+
+  /* Call the function from the address to get the func ptrs */
+  p_comp = (*p_core_comp->create_comp_func)(p_obj);
+  if (NULL == p_comp) {
+    ALOGE("%s:%d] Error cannot create component", __func__, __LINE__);
+    rc = OMX_ErrorInvalidComponent;
+    goto error;
+  }
+
+  *handle = p_core_comp->handle[inst_idx] = (OMX_HANDLETYPE)p_comp;
+
+  ALOGD("%s:%d] handle = %p Instanceindex = %d,"
+    "comp_idx %d g_ptr %p", __func__, __LINE__,
+    p_core_comp->handle[inst_idx], inst_idx,
+    comp_idx, g_omxcore);
+
+  p_comp->SetCallbacks(p_comp, callBacks, appData);
+  pthread_mutex_unlock(&g_omxcore_lock);
+  ALOGI("%s:%d] Success", __func__, __LINE__);
+  return OMX_ErrorNone;
+
+error:
+
+  if (OMX_TRUE == close_handle) {
+    dlclose(p_core_comp->lib_handle);
+    p_core_comp->lib_handle = NULL;
+  }
+  pthread_mutex_unlock(&g_omxcore_lock);
+  ALOGE("%s:%d] Error %d", __func__, __LINE__, rc);
+  return rc;
+}
+
+/*==============================================================================
+* Function : getIndexFromComponent
+* Parameters: handle,
+* Return Value : Component present - true or false, Instance Index, Component
+* Index
+* Description: Check if the handle is present in the list and get the component
+* index and instance index for the component handle.
+==============================================================================*/
+static int get_idx_from_handle(OMX_IN OMX_HANDLETYPE *ahComp, int *aCompIdx,
+  int *aInstIdx)
+{
+  int i = 0, j = 0;
+  for (i = 0; i < g_omxcore->comp_cnt; i++) {
+    for (j = 0; j < OMX_COMP_MAX_INSTANCES; j++) {
+      if ((OMX_COMPONENTTYPE *)g_omxcore->component[i].handle[j] ==
+        (OMX_COMPONENTTYPE *)ahComp) {
+        ALOGD("%s:%d] comp_idx %d inst_idx %d", __func__, __LINE__, i, j);
+        *aCompIdx = i;
+        *aInstIdx = j;
+        return TRUE;
+      }
+    }
+  }
+  return FALSE;
+}
+
+/*==============================================================================
+* Function : is_comp_active
+* Parameters: p_core_comp
+* Return Value : int
+* Description: Check if the component has any active instances
+==============================================================================*/
+static uint8_t is_comp_active(omx_core_component_t *p_core_comp)
+{
+  uint8_t i = 0;
+  for (i = 0; i < OMX_COMP_MAX_INSTANCES; i++) {
+    if (NULL != p_core_comp->handle[i]) {
+      return TRUE;
+    }
+  }
+  return FALSE;
+}
+
+/*==============================================================================
+* Function : OMX_FreeHandle
+* Parameters: hComp
+* Return Value : OMX_ERRORTYPE
+* Description: Deinit the omx component and remove it from the global list
+==============================================================================*/
+OMX_API OMX_ERRORTYPE OMX_APIENTRY OMX_FreeHandle(
+  OMX_IN OMX_HANDLETYPE hComp)
+{
+  OMX_ERRORTYPE rc = OMX_ErrorNone;
+  int comp_idx, inst_idx;
+  OMX_COMPONENTTYPE *p_comp = NULL;
+  omx_core_component_t *p_core_comp = NULL;
+
+  ALOGV("%s:%d] ", __func__, __LINE__);
+  if (hComp == NULL) {
+    return OMX_ErrorBadParameter;
+  }
+
+  pthread_mutex_lock(&g_omxcore_lock);
+
+  p_comp = (OMX_COMPONENTTYPE *)hComp;
+  if (FALSE == get_idx_from_handle(hComp, &comp_idx, &inst_idx)) {
+    ALOGE("%s:%d] Error invalid component", __func__, __LINE__);
+    pthread_mutex_unlock(&g_omxcore_lock);
+    return OMX_ErrorInvalidComponent;
+  }
+
+
+  //Deinit the component;
+  rc = p_comp->ComponentDeInit(hComp);
+  if (rc != OMX_ErrorNone) {
+    /* Remove the handle from the comp structure */
+    ALOGE("%s:%d] Error comp deinit failed", __func__, __LINE__);
+    pthread_mutex_unlock(&g_omxcore_lock);
+    return OMX_ErrorInvalidComponent;
+  }
+  p_core_comp = &g_omxcore->component[comp_idx];
+  p_core_comp->handle[inst_idx] = NULL;
+  if (!is_comp_active(p_core_comp)) {
+    rc = dlclose(p_core_comp->lib_handle);
+    p_core_comp->lib_handle = NULL;
+    p_core_comp->get_instance = NULL;
+    p_core_comp->create_comp_func = NULL;
+    p_core_comp->open = FALSE;
+  } else {
+    ALOGI("%s:%d] Error Component is still Active", __func__, __LINE__);
+  }
+  pthread_mutex_unlock(&g_omxcore_lock);
+  ALOGV("%s:%d] Success", __func__, __LINE__);
+  return rc;
+}
diff --git a/camera/mm-image-codec/qomx_core/qomx_core.h b/camera/mm-image-codec/qomx_core/qomx_core.h
new file mode 100644
index 0000000..c5e792b
--- /dev/null
+++ b/camera/mm-image-codec/qomx_core/qomx_core.h
@@ -0,0 +1,97 @@
+/*Copyright (c) 2012, The Linux Foundation. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+      copyright notice, this list of conditions and the following
+      disclaimer in the documentation and/or other materials provided
+      with the distribution.
+    * Neither the name of The Linux Foundation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
+ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.*/
+
+#ifndef QOMX_CORE_H
+#define QOMX_CORE_H
+
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <malloc.h>
+#include <pthread.h>
+#include <dlfcn.h>
+#include <stdlib.h>
+#include "OMX_Component.h"
+
+#define TRUE 1
+#define FALSE 0
+#define OMX_COMP_MAX_INSTANCES 3
+#define OMX_CORE_MAX_ROLES 1
+#define OMX_COMP_MAX_NUM 3
+#define OMX_SPEC_VERSION 0x00000101
+
+typedef void *(*get_instance_t)(void);
+typedef void *(*create_comp_func_t)(OMX_PTR aobj);
+
+/** comp_info_t: Structure containing the mapping
+*    between the library name and the corresponding .so name
+*    @comp_name: name of the component
+     @lib_name: Name of the .so library
+**/
+typedef struct comp_info_t {
+  char *comp_name;
+  char *lib_name;
+} comp_info_t;
+
+/** omx_core_component_t: OMX Component structure
+*    @handle: array of number of instances of the component
+*    @roles: array of roles played by the component
+*    @comp_info: Component information such as libname,
+*              component name
+*    @open: Is the component active
+*    @lib_handle: Library handle after dlopen
+*    @obj_ptr: Function ptr to get the instance of the component
+*    @comp_func_ptr: Function ptr to map the functions in the
+*     OMX handle to its respective function implementation in
+*     the component
+**/
+typedef struct _omx_core_component_t {
+  OMX_HANDLETYPE *handle[OMX_COMP_MAX_INSTANCES];  //Instance handle
+  char *roles[OMX_CORE_MAX_ROLES];  //Roles played by the component
+  char *name;  //Component Name
+  uint8_t open;  //Is component active
+  void *lib_handle;
+  get_instance_t get_instance;
+  create_comp_func_t create_comp_func;
+  char *comp_name;
+  char *lib_name;
+} omx_core_component_t;
+
+/** omx_core_t: Global structure that contains all the active
+*   components
+*    @component: array of active components
+*    @is_initialized: Flag to check if the OMX core has been
+*    initialized
+*    @core_lock: Lock to syncronize the omx core operations
+**/
+typedef struct _omx_core_t {
+  omx_core_component_t component[OMX_COMP_MAX_NUM];  //Array of pointers to components
+  int comp_cnt;
+  pthread_mutex_t core_lock;
+} omx_core_t;
+
+#endif