blob: 030d3029d62219af7f6fe64fe8176eac87dc53c1 [file] [log] [blame]
/* Copyright (c) 2012-2014, The Linux Foundataion. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of The Linux Foundation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#define LOG_TAG "QCamera3HWI"
//#define LOG_NDEBUG 0
#define __STDC_LIMIT_MACROS
#include <cutils/properties.h>
#include <hardware/camera3.h>
#include <camera/CameraMetadata.h>
#include <stdlib.h>
#include <fcntl.h>
#include <stdint.h>
#include <utils/Log.h>
#include <utils/Errors.h>
#include <sync/sync.h>
#include <gralloc_priv.h>
#include "QCamera3HWI.h"
#include "QCamera3Mem.h"
#include "QCamera3Channel.h"
#include "QCamera3PostProc.h"
#include "QCamera3VendorTags.h"
using namespace android;
namespace qcamera {
#define MAX(a, b) ((a) > (b) ? (a) : (b))
#define DATA_PTR(MEM_OBJ,INDEX) MEM_OBJ->getPtr( INDEX )
#define EMPTY_PIPELINE_DELAY 2
#define CAM_MAX_SYNC_LATENCY 4
#define TIMEOUT_NEVER -1
cam_capability_t *gCamCapability[MM_CAMERA_MAX_NUM_SENSORS];
const camera_metadata_t *gStaticMetadata[MM_CAMERA_MAX_NUM_SENSORS];
pthread_mutex_t QCamera3HardwareInterface::mCameraSessionLock =
PTHREAD_MUTEX_INITIALIZER;
unsigned int QCamera3HardwareInterface::mCameraSessionActive = 0;
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::EFFECT_MODES_MAP[] = {
{ ANDROID_CONTROL_EFFECT_MODE_OFF, CAM_EFFECT_MODE_OFF },
{ ANDROID_CONTROL_EFFECT_MODE_MONO, CAM_EFFECT_MODE_MONO },
{ ANDROID_CONTROL_EFFECT_MODE_NEGATIVE, CAM_EFFECT_MODE_NEGATIVE },
{ ANDROID_CONTROL_EFFECT_MODE_SOLARIZE, CAM_EFFECT_MODE_SOLARIZE },
{ ANDROID_CONTROL_EFFECT_MODE_SEPIA, CAM_EFFECT_MODE_SEPIA },
{ ANDROID_CONTROL_EFFECT_MODE_POSTERIZE, CAM_EFFECT_MODE_POSTERIZE },
{ ANDROID_CONTROL_EFFECT_MODE_WHITEBOARD, CAM_EFFECT_MODE_WHITEBOARD },
{ ANDROID_CONTROL_EFFECT_MODE_BLACKBOARD, CAM_EFFECT_MODE_BLACKBOARD },
{ ANDROID_CONTROL_EFFECT_MODE_AQUA, CAM_EFFECT_MODE_AQUA }
};
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::WHITE_BALANCE_MODES_MAP[] = {
{ ANDROID_CONTROL_AWB_MODE_OFF, CAM_WB_MODE_OFF },
{ ANDROID_CONTROL_AWB_MODE_AUTO, CAM_WB_MODE_AUTO },
{ ANDROID_CONTROL_AWB_MODE_INCANDESCENT, CAM_WB_MODE_INCANDESCENT },
{ ANDROID_CONTROL_AWB_MODE_FLUORESCENT, CAM_WB_MODE_FLUORESCENT },
{ ANDROID_CONTROL_AWB_MODE_WARM_FLUORESCENT,CAM_WB_MODE_WARM_FLUORESCENT},
{ ANDROID_CONTROL_AWB_MODE_DAYLIGHT, CAM_WB_MODE_DAYLIGHT },
{ ANDROID_CONTROL_AWB_MODE_CLOUDY_DAYLIGHT, CAM_WB_MODE_CLOUDY_DAYLIGHT },
{ ANDROID_CONTROL_AWB_MODE_TWILIGHT, CAM_WB_MODE_TWILIGHT },
{ ANDROID_CONTROL_AWB_MODE_SHADE, CAM_WB_MODE_SHADE }
};
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::SCENE_MODES_MAP[] = {
{ ANDROID_CONTROL_SCENE_MODE_FACE_PRIORITY, CAM_SCENE_MODE_OFF },
{ ANDROID_CONTROL_SCENE_MODE_ACTION, CAM_SCENE_MODE_ACTION },
{ ANDROID_CONTROL_SCENE_MODE_PORTRAIT, CAM_SCENE_MODE_PORTRAIT },
{ ANDROID_CONTROL_SCENE_MODE_LANDSCAPE, CAM_SCENE_MODE_LANDSCAPE },
{ ANDROID_CONTROL_SCENE_MODE_NIGHT, CAM_SCENE_MODE_NIGHT },
{ ANDROID_CONTROL_SCENE_MODE_NIGHT_PORTRAIT, CAM_SCENE_MODE_NIGHT_PORTRAIT },
{ ANDROID_CONTROL_SCENE_MODE_THEATRE, CAM_SCENE_MODE_THEATRE },
{ ANDROID_CONTROL_SCENE_MODE_BEACH, CAM_SCENE_MODE_BEACH },
{ ANDROID_CONTROL_SCENE_MODE_SNOW, CAM_SCENE_MODE_SNOW },
{ ANDROID_CONTROL_SCENE_MODE_SUNSET, CAM_SCENE_MODE_SUNSET },
{ ANDROID_CONTROL_SCENE_MODE_STEADYPHOTO, CAM_SCENE_MODE_ANTISHAKE },
{ ANDROID_CONTROL_SCENE_MODE_FIREWORKS , CAM_SCENE_MODE_FIREWORKS },
{ ANDROID_CONTROL_SCENE_MODE_SPORTS , CAM_SCENE_MODE_SPORTS },
{ ANDROID_CONTROL_SCENE_MODE_PARTY, CAM_SCENE_MODE_PARTY },
{ ANDROID_CONTROL_SCENE_MODE_CANDLELIGHT, CAM_SCENE_MODE_CANDLELIGHT },
{ ANDROID_CONTROL_SCENE_MODE_BARCODE, CAM_SCENE_MODE_BARCODE}
};
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::FOCUS_MODES_MAP[] = {
{ ANDROID_CONTROL_AF_MODE_OFF, CAM_FOCUS_MODE_OFF },
{ ANDROID_CONTROL_AF_MODE_OFF, CAM_FOCUS_MODE_FIXED },
{ ANDROID_CONTROL_AF_MODE_AUTO, CAM_FOCUS_MODE_AUTO },
{ ANDROID_CONTROL_AF_MODE_MACRO, CAM_FOCUS_MODE_MACRO },
{ ANDROID_CONTROL_AF_MODE_EDOF, CAM_FOCUS_MODE_EDOF },
{ ANDROID_CONTROL_AF_MODE_CONTINUOUS_PICTURE, CAM_FOCUS_MODE_CONTINOUS_PICTURE },
{ ANDROID_CONTROL_AF_MODE_CONTINUOUS_VIDEO, CAM_FOCUS_MODE_CONTINOUS_VIDEO }
};
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::ANTIBANDING_MODES_MAP[] = {
{ ANDROID_CONTROL_AE_ANTIBANDING_MODE_OFF, CAM_ANTIBANDING_MODE_OFF },
{ ANDROID_CONTROL_AE_ANTIBANDING_MODE_50HZ, CAM_ANTIBANDING_MODE_50HZ },
{ ANDROID_CONTROL_AE_ANTIBANDING_MODE_60HZ, CAM_ANTIBANDING_MODE_60HZ },
{ ANDROID_CONTROL_AE_ANTIBANDING_MODE_AUTO, CAM_ANTIBANDING_MODE_AUTO }
};
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::AE_FLASH_MODE_MAP[] = {
{ ANDROID_CONTROL_AE_MODE_OFF, CAM_FLASH_MODE_OFF },
{ ANDROID_CONTROL_AE_MODE_ON, CAM_FLASH_MODE_OFF },
{ ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH, CAM_FLASH_MODE_AUTO},
{ ANDROID_CONTROL_AE_MODE_ON_ALWAYS_FLASH, CAM_FLASH_MODE_ON },
{ ANDROID_CONTROL_AE_MODE_ON_AUTO_FLASH_REDEYE, CAM_FLASH_MODE_AUTO}
};
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::FLASH_MODES_MAP[] = {
{ ANDROID_FLASH_MODE_OFF, CAM_FLASH_MODE_OFF },
{ ANDROID_FLASH_MODE_SINGLE, CAM_FLASH_MODE_SINGLE },
{ ANDROID_FLASH_MODE_TORCH, CAM_FLASH_MODE_TORCH }
};
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::FACEDETECT_MODES_MAP[] = {
{ ANDROID_STATISTICS_FACE_DETECT_MODE_OFF, CAM_FACE_DETECT_MODE_OFF },
{ ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE, CAM_FACE_DETECT_MODE_SIMPLE },
{ ANDROID_STATISTICS_FACE_DETECT_MODE_FULL, CAM_FACE_DETECT_MODE_FULL }
};
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::FOCUS_CALIBRATION_MAP[] = {
{ ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_UNCALIBRATED,
CAM_FOCUS_UNCALIBRATED },
{ ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_APPROXIMATE,
CAM_FOCUS_APPROXIMATE },
{ ANDROID_LENS_INFO_FOCUS_DISTANCE_CALIBRATION_CALIBRATED,
CAM_FOCUS_CALIBRATED }
};
const int32_t available_thumbnail_sizes[] = {0, 0,
176, 144,
320, 240,
432, 288,
480, 288,
512, 288,
512, 384};
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::TEST_PATTERN_MAP[] = {
{ ANDROID_SENSOR_TEST_PATTERN_MODE_OFF, CAM_TEST_PATTERN_OFF },
{ ANDROID_SENSOR_TEST_PATTERN_MODE_SOLID_COLOR, CAM_TEST_PATTERN_SOLID_COLOR },
{ ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS, CAM_TEST_PATTERN_COLOR_BARS },
{ ANDROID_SENSOR_TEST_PATTERN_MODE_COLOR_BARS_FADE_TO_GRAY, CAM_TEST_PATTERN_COLOR_BARS_FADE_TO_GRAY },
{ ANDROID_SENSOR_TEST_PATTERN_MODE_PN9, CAM_TEST_PATTERN_PN9 },
};
/* Since there is no mapping for all the options some Android enum are not listed.
* Also, the order in this list is important because while mapping from HAL to Android it will
* traverse from lower to higher index which means that for HAL values that are map to different
* Android values, the traverse logic will select the first one found.
*/
const QCamera3HardwareInterface::QCameraMap QCamera3HardwareInterface::REFERENCE_ILLUMINANT_MAP[] = {
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FLUORESCENT, CAM_AWB_WARM_FLO},
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT_FLUORESCENT, CAM_AWB_CUSTOM_DAYLIGHT },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_COOL_WHITE_FLUORESCENT, CAM_AWB_COLD_FLO },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_STANDARD_A, CAM_AWB_A },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D55, CAM_AWB_NOON },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D65, CAM_AWB_D65 },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D75, CAM_AWB_D75 },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_D50, CAM_AWB_D50 },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_ISO_STUDIO_TUNGSTEN, CAM_AWB_CUSTOM_A},
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAYLIGHT, CAM_AWB_D50 },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_TUNGSTEN, CAM_AWB_A },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_FINE_WEATHER, CAM_AWB_D50 },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_CLOUDY_WEATHER, CAM_AWB_D65 },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_SHADE, CAM_AWB_D75 },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_DAY_WHITE_FLUORESCENT, CAM_AWB_CUSTOM_DAYLIGHT },
{ ANDROID_SENSOR_REFERENCE_ILLUMINANT1_WHITE_FLUORESCENT, CAM_AWB_COLD_FLO},
};
/* Custom tag definitions */
camera3_device_ops_t QCamera3HardwareInterface::mCameraOps = {
.initialize = QCamera3HardwareInterface::initialize,
.configure_streams = QCamera3HardwareInterface::configure_streams,
.register_stream_buffers = NULL,
.construct_default_request_settings = QCamera3HardwareInterface::construct_default_request_settings,
.process_capture_request = QCamera3HardwareInterface::process_capture_request,
.get_metadata_vendor_tag_ops = NULL,
.dump = QCamera3HardwareInterface::dump,
.flush = QCamera3HardwareInterface::flush,
.reserved = {0},
};
int QCamera3HardwareInterface::kMaxInFlight = 5;
/*===========================================================================
* FUNCTION : QCamera3HardwareInterface
*
* DESCRIPTION: constructor of QCamera3HardwareInterface
*
* PARAMETERS :
* @cameraId : camera ID
*
* RETURN : none
*==========================================================================*/
QCamera3HardwareInterface::QCamera3HardwareInterface(int cameraId,
const camera_module_callbacks_t *callbacks)
: mCameraId(cameraId),
mCameraHandle(NULL),
mCameraOpened(false),
mCameraInitialized(false),
mCallbackOps(NULL),
mInputStream(NULL),
mMetadataChannel(NULL),
mPictureChannel(NULL),
mRawChannel(NULL),
mSupportChannel(NULL),
mFirstRequest(false),
mRepeatingRequest(false),
mParamHeap(NULL),
mParameters(NULL),
mPrevParameters(NULL),
mLoopBackResult(NULL),
mFlush(false),
mMinProcessedFrameDuration(0),
mMinJpegFrameDuration(0),
mMinRawFrameDuration(0),
m_pPowerModule(NULL),
mHdrHint(false),
mMetaFrameCount(0),
mCallbacks(callbacks)
{
mCameraDevice.common.tag = HARDWARE_DEVICE_TAG;
mCameraDevice.common.version = CAMERA_DEVICE_API_VERSION_3_2;
mCameraDevice.common.close = close_camera_device;
mCameraDevice.ops = &mCameraOps;
mCameraDevice.priv = this;
gCamCapability[cameraId]->version = CAM_HAL_V3;
// TODO: hardcode for now until mctl add support for min_num_pp_bufs
//TBD - To see if this hardcoding is needed. Check by printing if this is filled by mctl to 3
gCamCapability[cameraId]->min_num_pp_bufs = 3;
pthread_cond_init(&mRequestCond, NULL);
mPendingRequest = 0;
mCurrentRequestId = -1;
pthread_mutex_init(&mMutex, NULL);
for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++)
mDefaultMetadata[i] = NULL;
#ifdef HAS_MULTIMEDIA_HINTS
if (hw_get_module(POWER_HARDWARE_MODULE_ID, (const hw_module_t **)&m_pPowerModule)) {
ALOGE("%s: %s module not found", __func__, POWER_HARDWARE_MODULE_ID);
}
#endif
}
/*===========================================================================
* FUNCTION : ~QCamera3HardwareInterface
*
* DESCRIPTION: destructor of QCamera3HardwareInterface
*
* PARAMETERS : none
*
* RETURN : none
*==========================================================================*/
QCamera3HardwareInterface::~QCamera3HardwareInterface()
{
ALOGV("%s: E", __func__);
/* We need to stop all streams before deleting any stream */
// NOTE: 'camera3_stream_t *' objects are already freed at
// this stage by the framework
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (*it)->channel;
if (channel) {
channel->stop();
}
}
if (mSupportChannel)
mSupportChannel->stop();
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (*it)->channel;
if (channel)
delete channel;
free (*it);
}
if (mSupportChannel) {
delete mSupportChannel;
mSupportChannel = NULL;
}
mPictureChannel = NULL;
/* Clean up all channels */
if (mCameraInitialized) {
if (mMetadataChannel) {
mMetadataChannel->stop();
delete mMetadataChannel;
mMetadataChannel = NULL;
}
deinitParameters();
}
if (mCameraOpened)
closeCamera();
mPendingBuffersMap.mPendingBufferList.clear();
mPendingRequestsList.clear();
for (size_t i = 0; i < CAMERA3_TEMPLATE_COUNT; i++)
if (mDefaultMetadata[i])
free_camera_metadata(mDefaultMetadata[i]);
pthread_cond_destroy(&mRequestCond);
pthread_mutex_destroy(&mMutex);
ALOGV("%s: X", __func__);
}
/*===========================================================================
* FUNCTION : openCamera
*
* DESCRIPTION: open camera
*
* PARAMETERS :
* @hw_device : double ptr for camera device struct
*
* RETURN : int32_t type of status
* NO_ERROR -- success
* none-zero failure code
*==========================================================================*/
int QCamera3HardwareInterface::openCamera(struct hw_device_t **hw_device)
{
int rc = 0;
pthread_mutex_lock(&mCameraSessionLock);
if (mCameraSessionActive) {
ALOGE("%s: multiple simultaneous camera instance not supported", __func__);
pthread_mutex_unlock(&mCameraSessionLock);
return -EUSERS;
}
if (mCameraOpened) {
*hw_device = NULL;
return PERMISSION_DENIED;
}
rc = openCamera();
if (rc == 0) {
*hw_device = &mCameraDevice.common;
mCameraSessionActive = 1;
} else
*hw_device = NULL;
#ifdef HAS_MULTIMEDIA_HINTS
if (rc == 0) {
if (m_pPowerModule) {
if (m_pPowerModule->powerHint) {
m_pPowerModule->powerHint(m_pPowerModule, POWER_HINT_VIDEO_ENCODE,
(void *)"state=1");
}
}
}
#endif
pthread_mutex_unlock(&mCameraSessionLock);
return rc;
}
/*===========================================================================
* FUNCTION : openCamera
*
* DESCRIPTION: open camera
*
* PARAMETERS : none
*
* RETURN : int32_t type of status
* NO_ERROR -- success
* none-zero failure code
*==========================================================================*/
int QCamera3HardwareInterface::openCamera()
{
if (mCameraHandle) {
ALOGE("Failure: Camera already opened");
return ALREADY_EXISTS;
}
mCameraHandle = camera_open(mCameraId);
if (!mCameraHandle) {
ALOGE("camera_open failed.");
return UNKNOWN_ERROR;
}
mCameraOpened = true;
return NO_ERROR;
}
/*===========================================================================
* FUNCTION : closeCamera
*
* DESCRIPTION: close camera
*
* PARAMETERS : none
*
* RETURN : int32_t type of status
* NO_ERROR -- success
* none-zero failure code
*==========================================================================*/
int QCamera3HardwareInterface::closeCamera()
{
int rc = NO_ERROR;
rc = mCameraHandle->ops->close_camera(mCameraHandle->camera_handle);
mCameraHandle = NULL;
mCameraOpened = false;
#ifdef HAS_MULTIMEDIA_HINTS
if (rc == NO_ERROR) {
if (m_pPowerModule) {
if (m_pPowerModule->powerHint) {
if(mHdrHint == true) {
m_pPowerModule->powerHint(m_pPowerModule, POWER_HINT_VIDEO_ENCODE,
(void *)"state=3");
mHdrHint = false;
}
m_pPowerModule->powerHint(m_pPowerModule, POWER_HINT_VIDEO_ENCODE,
(void *)"state=0");
}
}
}
#endif
return rc;
}
/*===========================================================================
* FUNCTION : initialize
*
* DESCRIPTION: Initialize frameworks callback functions
*
* PARAMETERS :
* @callback_ops : callback function to frameworks
*
* RETURN :
*
*==========================================================================*/
int QCamera3HardwareInterface::initialize(
const struct camera3_callback_ops *callback_ops)
{
int rc;
pthread_mutex_lock(&mMutex);
rc = initParameters();
if (rc < 0) {
ALOGE("%s: initParamters failed %d", __func__, rc);
goto err1;
}
mCallbackOps = callback_ops;
pthread_mutex_unlock(&mMutex);
mCameraInitialized = true;
return 0;
err1:
pthread_mutex_unlock(&mMutex);
return rc;
}
/*===========================================================================
* FUNCTION : validateStreamDimensions
*
* DESCRIPTION: Check if the configuration requested are those advertised
*
* PARAMETERS :
* @stream_list : streams to be configured
*
* RETURN :
*
*==========================================================================*/
int QCamera3HardwareInterface::validateStreamDimensions(
camera3_stream_configuration_t *streamList)
{
int rc = NO_ERROR;
/*
* Loop through all streams requested in configuration
* Check if unsupported sizes have been requested on any of them
*/
for (size_t j = 0; j < streamList->num_streams; j++){
bool sizeFound = false;
camera3_stream_t *newStream = streamList->streams[j];
/*
* Sizes are different for each type of stream format check against
* appropriate table.
*/
switch (newStream->format) {
case ANDROID_SCALER_AVAILABLE_FORMATS_RAW16:
case ANDROID_SCALER_AVAILABLE_FORMATS_RAW_OPAQUE:
case HAL_PIXEL_FORMAT_RAW10:
for (int i = 0;
i < gCamCapability[mCameraId]->supported_raw_dim_cnt; i++){
if (gCamCapability[mCameraId]->raw_dim[i].width
== (int32_t) newStream->width
&& gCamCapability[mCameraId]->raw_dim[i].height
== (int32_t) newStream->height) {
sizeFound = true;
}
}
break;
case HAL_PIXEL_FORMAT_BLOB:
for (int i = 0;
i < gCamCapability[mCameraId]->picture_sizes_tbl_cnt;i++){
if ((int32_t)(newStream->width) ==
gCamCapability[mCameraId]
->picture_sizes_tbl[i].width
&& (int32_t)(newStream->height) ==
gCamCapability[mCameraId]
->picture_sizes_tbl[i].height){
sizeFound = true;
break;
}
}
break;
case HAL_PIXEL_FORMAT_YCbCr_420_888:
case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
default:
/* ZSL stream will be full active array size validate that*/
if (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) {
if ((int32_t)(newStream->width) ==
gCamCapability[mCameraId]->active_array_size.width
&& (int32_t)(newStream->height) ==
gCamCapability[mCameraId]->active_array_size.height) {
sizeFound = true;
}
/* We could potentially break here to enforce ZSL stream
* set from frameworks always has full active array size
* but it is not clear from spec if framework will always
* follow that, also we have logic to override to full array
* size, so keeping this logic lenient at the moment.
*/
}
/* Non ZSL stream still need to conform to advertised sizes*/
for (int i = 0;
i < gCamCapability[mCameraId]->picture_sizes_tbl_cnt;i++){
if ((int32_t)(newStream->width) ==
gCamCapability[mCameraId]
->picture_sizes_tbl[i].width
&& (int32_t)(newStream->height) ==
gCamCapability[mCameraId]
->picture_sizes_tbl[i].height){
sizeFound = true;
break;
}
}
break;
} /* End of switch(newStream->format) */
/* We error out even if a single stream has unsupported size set */
if (!sizeFound) {
ALOGE("%s: Error: Unsupported size of %d x %d requested for stream"
"type:%d", __func__, newStream->width, newStream->height,
newStream->format);
rc = -EINVAL;
break;
}
} /* End of for each stream */
return rc;
}
/*===========================================================================
* FUNCTION : configureStreams
*
* DESCRIPTION: Reset HAL camera device processing pipeline and set up new input
* and output streams.
*
* PARAMETERS :
* @stream_list : streams to be configured
*
* RETURN :
*
*==========================================================================*/
int QCamera3HardwareInterface::configureStreams(
camera3_stream_configuration_t *streamList)
{
int rc = 0;
// Sanity check stream_list
if (streamList == NULL) {
ALOGE("%s: NULL stream configuration", __func__);
return BAD_VALUE;
}
if (streamList->streams == NULL) {
ALOGE("%s: NULL stream list", __func__);
return BAD_VALUE;
}
if (streamList->num_streams < 1) {
ALOGE("%s: Bad number of streams requested: %d", __func__,
streamList->num_streams);
return BAD_VALUE;
}
rc = validateStreamDimensions(streamList);
if (rc != NO_ERROR) {
ALOGE("%s: Invalid stream configuration requested!", __func__);
return rc;
}
/* first invalidate all the steams in the mStreamList
* if they appear again, they will be validated */
for (List<stream_info_t*>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (QCamera3Channel*)(*it)->stream->priv;
channel->stop();
(*it)->status = INVALID;
}
if (mSupportChannel)
mSupportChannel->stop();
if (mMetadataChannel) {
/* If content of mStreamInfo is not 0, there is metadata stream */
mMetadataChannel->stop();
}
#ifdef HAS_MULTIMEDIA_HINTS
if(mHdrHint == true) {
if (m_pPowerModule) {
if (m_pPowerModule->powerHint) {
m_pPowerModule->powerHint(m_pPowerModule, POWER_HINT_VIDEO_ENCODE,
(void *)"state=3");
mHdrHint = false;
}
}
}
#endif
pthread_mutex_lock(&mMutex);
bool isZsl = false;
camera3_stream_t *inputStream = NULL;
camera3_stream_t *jpegStream = NULL;
cam_stream_size_info_t stream_config_info;
for (size_t i = 0; i < streamList->num_streams; i++) {
camera3_stream_t *newStream = streamList->streams[i];
ALOGD("%s: newStream type = %d, stream format = %d stream size : %d x %d",
__func__, newStream->stream_type, newStream->format,
newStream->width, newStream->height);
//if the stream is in the mStreamList validate it
bool stream_exists = false;
for (List<stream_info_t*>::iterator it=mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
if ((*it)->stream == newStream) {
QCamera3Channel *channel =
(QCamera3Channel*)(*it)->stream->priv;
stream_exists = true;
delete channel;
(*it)->status = VALID;
(*it)->stream->priv = NULL;
(*it)->channel = NULL;
}
}
if (!stream_exists) {
//new stream
stream_info_t* stream_info;
stream_info = (stream_info_t* )malloc(sizeof(stream_info_t));
stream_info->stream = newStream;
stream_info->status = VALID;
stream_info->channel = NULL;
mStreamInfo.push_back(stream_info);
}
if (newStream->stream_type == CAMERA3_STREAM_INPUT
|| newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL ) {
if (inputStream != NULL) {
ALOGE("%s: Multiple input streams requested!", __func__);
pthread_mutex_unlock(&mMutex);
return BAD_VALUE;
}
inputStream = newStream;
}
if (newStream->format == HAL_PIXEL_FORMAT_BLOB) {
jpegStream = newStream;
}
}
mInputStream = inputStream;
cleanAndSortStreamInfo();
if (mMetadataChannel) {
delete mMetadataChannel;
mMetadataChannel = NULL;
}
if (mSupportChannel) {
delete mSupportChannel;
mSupportChannel = NULL;
}
//Create metadata channel and initialize it
mMetadataChannel = new QCamera3MetadataChannel(mCameraHandle->camera_handle,
mCameraHandle->ops, captureResultCb,
&gCamCapability[mCameraId]->padding_info, this);
if (mMetadataChannel == NULL) {
ALOGE("%s: failed to allocate metadata channel", __func__);
rc = -ENOMEM;
pthread_mutex_unlock(&mMutex);
return rc;
}
rc = mMetadataChannel->initialize();
if (rc < 0) {
ALOGE("%s: metadata channel initialization failed", __func__);
delete mMetadataChannel;
mMetadataChannel = NULL;
pthread_mutex_unlock(&mMutex);
return rc;
}
/* Create dummy stream if there is one single raw stream */
if (streamList->num_streams == 1 &&
(streamList->streams[0]->format == HAL_PIXEL_FORMAT_RAW_OPAQUE ||
streamList->streams[0]->format == HAL_PIXEL_FORMAT_RAW16)) {
mSupportChannel = new QCamera3SupportChannel(
mCameraHandle->camera_handle,
mCameraHandle->ops,
&gCamCapability[mCameraId]->padding_info,
this);
if (!mSupportChannel) {
ALOGE("%s: dummy channel cannot be created", __func__);
pthread_mutex_unlock(&mMutex);
return -ENOMEM;
}
}
/* Allocate channel objects for the requested streams */
for (size_t i = 0; i < streamList->num_streams; i++) {
camera3_stream_t *newStream = streamList->streams[i];
uint32_t stream_usage = newStream->usage;
stream_config_info.stream_sizes[i].width = newStream->width;
stream_config_info.stream_sizes[i].height = newStream->height;
if (newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL &&
newStream->format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED && jpegStream){
//for zsl stream the size is active array size
isZsl = true;
stream_config_info.stream_sizes[i].width =
gCamCapability[mCameraId]->active_array_size.width;
stream_config_info.stream_sizes[i].height =
gCamCapability[mCameraId]->active_array_size.height;
stream_config_info.type[i] = CAM_STREAM_TYPE_SNAPSHOT;
} else {
//for non zsl streams find out the format
switch (newStream->format) {
case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED :
{
if (stream_usage & private_handle_t::PRIV_FLAGS_VIDEO_ENCODER) {
stream_config_info.type[i] = CAM_STREAM_TYPE_VIDEO;
} else {
stream_config_info.type[i] = CAM_STREAM_TYPE_PREVIEW;
}
}
break;
case HAL_PIXEL_FORMAT_YCbCr_420_888:
stream_config_info.type[i] = CAM_STREAM_TYPE_CALLBACK;
#ifdef HAS_MULTIMEDIA_HINTS
if (m_pPowerModule) {
if (m_pPowerModule->powerHint) {
m_pPowerModule->powerHint(m_pPowerModule,
POWER_HINT_VIDEO_ENCODE, (void *)"state=2");
mHdrHint = true;
}
}
#endif
break;
case HAL_PIXEL_FORMAT_BLOB:
stream_config_info.type[i] = CAM_STREAM_TYPE_NON_ZSL_SNAPSHOT;
break;
case HAL_PIXEL_FORMAT_RAW_OPAQUE:
case HAL_PIXEL_FORMAT_RAW16:
stream_config_info.type[i] = CAM_STREAM_TYPE_RAW;
break;
default:
stream_config_info.type[i] = CAM_STREAM_TYPE_DEFAULT;
break;
}
}
if (newStream->priv == NULL) {
//New stream, construct channel
switch (newStream->stream_type) {
case CAMERA3_STREAM_INPUT:
newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ;
break;
case CAMERA3_STREAM_BIDIRECTIONAL:
newStream->usage = GRALLOC_USAGE_HW_CAMERA_READ |
GRALLOC_USAGE_HW_CAMERA_WRITE;
break;
case CAMERA3_STREAM_OUTPUT:
/* For video encoding stream, set read/write rarely
* flag so that they may be set to un-cached */
if (newStream->usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)
newStream->usage =
(GRALLOC_USAGE_SW_READ_RARELY |
GRALLOC_USAGE_SW_WRITE_RARELY |
GRALLOC_USAGE_HW_CAMERA_WRITE);
else
newStream->usage = GRALLOC_USAGE_HW_CAMERA_WRITE;
break;
default:
ALOGE("%s: Invalid stream_type %d", __func__, newStream->stream_type);
break;
}
if (newStream->stream_type == CAMERA3_STREAM_OUTPUT ||
newStream->stream_type == CAMERA3_STREAM_BIDIRECTIONAL) {
QCamera3Channel *channel = NULL;
switch (newStream->format) {
case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
case HAL_PIXEL_FORMAT_YCbCr_420_888:
newStream->max_buffers = QCamera3RegularChannel::kMaxBuffers;
channel = new QCamera3RegularChannel(mCameraHandle->camera_handle,
mCameraHandle->ops, captureResultCb,
&gCamCapability[mCameraId]->padding_info,
this,
newStream,
(cam_stream_type_t) stream_config_info.type[i]);
if (channel == NULL) {
ALOGE("%s: allocation of channel failed", __func__);
pthread_mutex_unlock(&mMutex);
return -ENOMEM;
}
newStream->priv = channel;
break;
case HAL_PIXEL_FORMAT_RAW_OPAQUE:
case HAL_PIXEL_FORMAT_RAW16:
newStream->max_buffers = QCamera3RawChannel::kMaxBuffers;
mRawChannel = new QCamera3RawChannel(
mCameraHandle->camera_handle,
mCameraHandle->ops, captureResultCb,
&gCamCapability[mCameraId]->padding_info,
this, newStream, (newStream->format == HAL_PIXEL_FORMAT_RAW16));
if (mRawChannel == NULL) {
ALOGE("%s: allocation of raw channel failed", __func__);
pthread_mutex_unlock(&mMutex);
return -ENOMEM;
}
newStream->priv = (QCamera3Channel*)mRawChannel;
break;
case HAL_PIXEL_FORMAT_BLOB:
newStream->max_buffers = QCamera3PicChannel::kMaxBuffers;
mPictureChannel = new QCamera3PicChannel(mCameraHandle->camera_handle,
mCameraHandle->ops, captureResultCb,
&gCamCapability[mCameraId]->padding_info, this, newStream);
if (mPictureChannel == NULL) {
ALOGE("%s: allocation of channel failed", __func__);
pthread_mutex_unlock(&mMutex);
return -ENOMEM;
}
newStream->priv = (QCamera3Channel*)mPictureChannel;
break;
default:
ALOGE("%s: not a supported format 0x%x", __func__, newStream->format);
break;
}
}
for (List<stream_info_t*>::iterator it=mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
if ((*it)->stream == newStream) {
(*it)->channel = (QCamera3Channel*) newStream->priv;
break;
}
}
} else {
// Channel already exists for this stream
// Do nothing for now
}
}
if (isZsl)
mPictureChannel->overrideYuvSize(
gCamCapability[mCameraId]->active_array_size.width,
gCamCapability[mCameraId]->active_array_size.height);
int32_t hal_version = CAM_HAL_V3;
stream_config_info.num_streams = streamList->num_streams;
if (mSupportChannel) {
stream_config_info.stream_sizes[stream_config_info.num_streams] =
QCamera3SupportChannel::kDim;
stream_config_info.type[stream_config_info.num_streams] =
CAM_STREAM_TYPE_CALLBACK;
stream_config_info.num_streams++;
}
// settings/parameters don't carry over for new configureStreams
memset(mParameters, 0, sizeof(metadata_buffer_t));
mParameters->first_flagged_entry = CAM_INTF_PARM_MAX;
AddSetMetaEntryToBatch(mParameters, CAM_INTF_PARM_HAL_VERSION,
sizeof(hal_version), &hal_version);
AddSetMetaEntryToBatch(mParameters, CAM_INTF_META_STREAM_INFO,
sizeof(stream_config_info), &stream_config_info);
mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, mParameters);
/* Initialize mPendingRequestInfo and mPendnigBuffersMap */
mPendingRequestsList.clear();
mPendingFrameDropList.clear();
// Initialize/Reset the pending buffers list
mPendingBuffersMap.num_buffers = 0;
mPendingBuffersMap.mPendingBufferList.clear();
mFirstRequest = true;
//Get min frame duration for this streams configuration
deriveMinFrameDuration();
pthread_mutex_unlock(&mMutex);
return rc;
}
/*===========================================================================
* FUNCTION : validateCaptureRequest
*
* DESCRIPTION: validate a capture request from camera service
*
* PARAMETERS :
* @request : request from framework to process
*
* RETURN :
*
*==========================================================================*/
int QCamera3HardwareInterface::validateCaptureRequest(
camera3_capture_request_t *request)
{
ssize_t idx = 0;
const camera3_stream_buffer_t *b;
CameraMetadata meta;
/* Sanity check the request */
if (request == NULL) {
ALOGE("%s: NULL capture request", __func__);
return BAD_VALUE;
}
if (request->settings == NULL && mFirstRequest) {
/*settings cannot be null for the first request*/
return BAD_VALUE;
}
uint32_t frameNumber = request->frame_number;
if (request->input_buffer != NULL &&
request->input_buffer->stream != mInputStream) {
ALOGE("%s: Request %d: Input buffer not from input stream!",
__FUNCTION__, frameNumber);
return BAD_VALUE;
}
if (request->num_output_buffers < 1 || request->output_buffers == NULL) {
ALOGE("%s: Request %d: No output buffers provided!",
__FUNCTION__, frameNumber);
return BAD_VALUE;
}
if (request->input_buffer != NULL) {
b = request->input_buffer;
QCamera3Channel *channel =
static_cast<QCamera3Channel*>(b->stream->priv);
if (channel == NULL) {
ALOGE("%s: Request %d: Buffer %d: Unconfigured stream!",
__func__, frameNumber, idx);
return BAD_VALUE;
}
if (b->status != CAMERA3_BUFFER_STATUS_OK) {
ALOGE("%s: Request %d: Buffer %d: Status not OK!",
__func__, frameNumber, idx);
return BAD_VALUE;
}
if (b->release_fence != -1) {
ALOGE("%s: Request %d: Buffer %d: Has a release fence!",
__func__, frameNumber, idx);
return BAD_VALUE;
}
if (b->buffer == NULL) {
ALOGE("%s: Request %d: Buffer %d: NULL buffer handle!",
__func__, frameNumber, idx);
return BAD_VALUE;
}
}
// Validate all buffers
b = request->output_buffers;
do {
QCamera3Channel *channel =
static_cast<QCamera3Channel*>(b->stream->priv);
if (channel == NULL) {
ALOGE("%s: Request %d: Buffer %d: Unconfigured stream!",
__func__, frameNumber, idx);
return BAD_VALUE;
}
if (b->status != CAMERA3_BUFFER_STATUS_OK) {
ALOGE("%s: Request %d: Buffer %d: Status not OK!",
__func__, frameNumber, idx);
return BAD_VALUE;
}
if (b->release_fence != -1) {
ALOGE("%s: Request %d: Buffer %d: Has a release fence!",
__func__, frameNumber, idx);
return BAD_VALUE;
}
if (b->buffer == NULL) {
ALOGE("%s: Request %d: Buffer %d: NULL buffer handle!",
__func__, frameNumber, idx);
return BAD_VALUE;
}
idx++;
b = request->output_buffers + idx;
} while (idx < (ssize_t)request->num_output_buffers);
return NO_ERROR;
}
/*===========================================================================
* FUNCTION : deriveMinFrameDuration
*
* DESCRIPTION: derive mininum processed, jpeg, and raw frame durations based
* on currently configured streams.
*
* PARAMETERS : NONE
*
* RETURN : NONE
*
*==========================================================================*/
void QCamera3HardwareInterface::deriveMinFrameDuration()
{
int32_t maxJpegDim, maxProcessedDim, maxRawDim;
maxJpegDim = 0;
maxProcessedDim = 0;
maxRawDim = 0;
// Figure out maximum jpeg, processed, and raw dimensions
for (List<stream_info_t*>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
// Input stream doesn't have valid stream_type
if ((*it)->stream->stream_type == CAMERA3_STREAM_INPUT)
continue;
int32_t dimension = (*it)->stream->width * (*it)->stream->height;
if ((*it)->stream->format == HAL_PIXEL_FORMAT_BLOB) {
if (dimension > maxJpegDim)
maxJpegDim = dimension;
} else if ((*it)->stream->format == HAL_PIXEL_FORMAT_RAW_OPAQUE ||
(*it)->stream->format == HAL_PIXEL_FORMAT_RAW16) {
if (dimension > maxRawDim)
maxRawDim = dimension;
} else {
if (dimension > maxProcessedDim)
maxProcessedDim = dimension;
}
}
//Assume all jpeg dimensions are in processed dimensions.
if (maxJpegDim > maxProcessedDim)
maxProcessedDim = maxJpegDim;
//Find the smallest raw dimension that is greater or equal to jpeg dimension
if (maxProcessedDim > maxRawDim) {
maxRawDim = INT32_MAX;
for (int i = 0; i < gCamCapability[mCameraId]->supported_raw_dim_cnt;
i++) {
int32_t dimension =
gCamCapability[mCameraId]->raw_dim[i].width *
gCamCapability[mCameraId]->raw_dim[i].height;
if (dimension >= maxProcessedDim && dimension < maxRawDim)
maxRawDim = dimension;
}
}
//Find minimum durations for processed, jpeg, and raw
for (int i = 0; i < gCamCapability[mCameraId]->supported_raw_dim_cnt;
i++) {
if (maxRawDim == gCamCapability[mCameraId]->raw_dim[i].width *
gCamCapability[mCameraId]->raw_dim[i].height) {
mMinRawFrameDuration = gCamCapability[mCameraId]->raw_min_duration[i];
break;
}
}
for (int i = 0; i < gCamCapability[mCameraId]->picture_sizes_tbl_cnt; i++) {
if (maxProcessedDim ==
gCamCapability[mCameraId]->picture_sizes_tbl[i].width *
gCamCapability[mCameraId]->picture_sizes_tbl[i].height) {
mMinProcessedFrameDuration = gCamCapability[mCameraId]->picture_min_duration[i];
mMinJpegFrameDuration = gCamCapability[mCameraId]->picture_min_duration[i];
break;
}
}
}
/*===========================================================================
* FUNCTION : getMinFrameDuration
*
* DESCRIPTION: get minimum frame draution based on the current maximum frame durations
* and current request configuration.
*
* PARAMETERS : @request: requset sent by the frameworks
*
* RETURN : min farme duration for a particular request
*
*==========================================================================*/
int64_t QCamera3HardwareInterface::getMinFrameDuration(const camera3_capture_request_t *request)
{
bool hasJpegStream = false;
bool hasRawStream __unused = false;
for (uint32_t i = 0; i < request->num_output_buffers; i ++) {
const camera3_stream_t *stream = request->output_buffers[i].stream;
if (stream->format == HAL_PIXEL_FORMAT_BLOB)
hasJpegStream = true;
else if (stream->format == HAL_PIXEL_FORMAT_RAW_OPAQUE ||
stream->format == HAL_PIXEL_FORMAT_RAW16)
hasRawStream = true;
}
if (!hasJpegStream)
return MAX(mMinRawFrameDuration, mMinProcessedFrameDuration);
else
return MAX(MAX(mMinRawFrameDuration, mMinProcessedFrameDuration), mMinJpegFrameDuration);
}
/*===========================================================================
* FUNCTION : handleMetadataWithLock
*
* DESCRIPTION: Handles metadata buffer callback with mMutex lock held.
*
* PARAMETERS : @metadata_buf: metadata buffer
*
* RETURN :
*
*==========================================================================*/
void QCamera3HardwareInterface::handleMetadataWithLock(
mm_camera_super_buf_t *metadata_buf)
{
metadata_buffer_t *metadata = (metadata_buffer_t *)metadata_buf->bufs[0]->buffer;
int32_t frame_number_valid = *(int32_t *)
POINTER_OF(CAM_INTF_META_FRAME_NUMBER_VALID, metadata);
uint32_t pending_requests = *(uint32_t *)POINTER_OF(
CAM_INTF_META_PENDING_REQUESTS, metadata);
uint32_t frame_number = *(uint32_t *)
POINTER_OF(CAM_INTF_META_FRAME_NUMBER, metadata);
const struct timeval *tv = (const struct timeval *)
POINTER_OF(CAM_INTF_META_SENSOR_TIMESTAMP, metadata);
nsecs_t capture_time = (nsecs_t)tv->tv_sec * NSEC_PER_SEC +
tv->tv_usec * NSEC_PER_USEC;
cam_frame_dropped_t cam_frame_drop = *(cam_frame_dropped_t *)
POINTER_OF(CAM_INTF_META_FRAME_DROPPED, metadata);
int32_t urgent_frame_number_valid = *(int32_t *)
POINTER_OF(CAM_INTF_META_URGENT_FRAME_NUMBER_VALID, metadata);
uint32_t urgent_frame_number = *(uint32_t *)
POINTER_OF(CAM_INTF_META_URGENT_FRAME_NUMBER, metadata);
if (urgent_frame_number_valid) {
ALOGV("%s: valid urgent frame_number = %d, capture_time = %lld",
__func__, urgent_frame_number, capture_time);
//Recieved an urgent Frame Number, handle it
//using partial results
for (List<PendingRequestInfo>::iterator i =
mPendingRequestsList.begin(); i != mPendingRequestsList.end(); i++) {
camera3_notify_msg_t notify_msg;
ALOGV("%s: Iterator Frame = %d urgent frame = %d",
__func__, i->frame_number, urgent_frame_number);
if (i->frame_number < urgent_frame_number &&
i->bNotified == 0) {
notify_msg.type = CAMERA3_MSG_SHUTTER;
notify_msg.message.shutter.frame_number = i->frame_number;
notify_msg.message.shutter.timestamp = capture_time -
(urgent_frame_number - i->frame_number) * NSEC_PER_33MSEC;
mCallbackOps->notify(mCallbackOps, &notify_msg);
i->timestamp = notify_msg.message.shutter.timestamp;
i->bNotified = 1;
ALOGV("%s: Support notification !!!! notify frame_number = %d, capture_time = %lld",
__func__, i->frame_number, notify_msg.message.shutter.timestamp);
}
if (i->frame_number == urgent_frame_number) {
camera3_capture_result_t result;
memset(&result, 0, sizeof(camera3_capture_result_t));
// Send shutter notify to frameworks
notify_msg.type = CAMERA3_MSG_SHUTTER;
notify_msg.message.shutter.frame_number = i->frame_number;
notify_msg.message.shutter.timestamp = capture_time;
mCallbackOps->notify(mCallbackOps, &notify_msg);
i->timestamp = capture_time;
i->bNotified = 1;
i->partial_result_cnt++;
// Extract 3A metadata
result.result =
translateCbUrgentMetadataToResultMetadata(metadata);
// Populate metadata result
result.frame_number = urgent_frame_number;
result.num_output_buffers = 0;
result.output_buffers = NULL;
result.partial_result = i->partial_result_cnt;
mCallbackOps->process_capture_result(mCallbackOps, &result);
ALOGV("%s: urgent frame_number = %d, capture_time = %lld",
__func__, result.frame_number, capture_time);
free_camera_metadata((camera_metadata_t *)result.result);
break;
}
}
}
if (!frame_number_valid) {
ALOGV("%s: Not a valid normal frame number, used as SOF only", __func__);
mMetadataChannel->bufDone(metadata_buf);
free(metadata_buf);
goto done_metadata;
}
ALOGV("%s: valid normal frame_number = %d, capture_time = %lld", __func__,
frame_number, capture_time);
// Go through the pending requests info and send shutter/results to frameworks
for (List<PendingRequestInfo>::iterator i = mPendingRequestsList.begin();
i != mPendingRequestsList.end() && i->frame_number <= frame_number;) {
camera3_capture_result_t result;
memset(&result, 0, sizeof(camera3_capture_result_t));
ALOGV("%s: frame_number in the list is %d", __func__, i->frame_number);
i->partial_result_cnt++;
result.partial_result = i->partial_result_cnt;
// Flush out all entries with less or equal frame numbers.
mPendingRequest--;
// Check whether any stream buffer corresponding to this is dropped or not
// If dropped, then notify ERROR_BUFFER for the corresponding stream and
// buffer with CAMERA3_BUFFER_STATUS_ERROR
if (cam_frame_drop.frame_dropped) {
camera3_notify_msg_t notify_msg;
for (List<RequestedBufferInfo>::iterator j = i->buffers.begin();
j != i->buffers.end(); j++) {
QCamera3Channel *channel = (QCamera3Channel *)j->stream->priv;
uint32_t streamID = channel->getStreamID(channel->getStreamTypeMask());
for (uint32_t k=0; k<cam_frame_drop.cam_stream_ID.num_streams; k++) {
if (streamID == cam_frame_drop.cam_stream_ID.streamID[k]) {
// Send Error notify to frameworks with CAMERA3_MSG_ERROR_BUFFER
ALOGV("%s: Start of reporting error frame#=%d, streamID=%d",
__func__, i->frame_number, streamID);
notify_msg.type = CAMERA3_MSG_ERROR;
notify_msg.message.error.frame_number = i->frame_number;
notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_BUFFER ;
notify_msg.message.error.error_stream = j->stream;
mCallbackOps->notify(mCallbackOps, &notify_msg);
ALOGV("%s: End of reporting error frame#=%d, streamID=%d",
__func__, i->frame_number, streamID);
PendingFrameDropInfo PendingFrameDrop;
PendingFrameDrop.frame_number=i->frame_number;
PendingFrameDrop.stream_ID = streamID;
// Add the Frame drop info to mPendingFrameDropList
mPendingFrameDropList.push_back(PendingFrameDrop);
}
}
}
}
// Send empty metadata with already filled buffers for dropped metadata
// and send valid metadata with already filled buffers for current metadata
if (i->frame_number < frame_number) {
CameraMetadata dummyMetadata;
dummyMetadata.update(ANDROID_SENSOR_TIMESTAMP,
&i->timestamp, 1);
dummyMetadata.update(ANDROID_REQUEST_ID,
&(i->request_id), 1);
result.result = dummyMetadata.release();
} else {
uint8_t bufferStalled = *((uint8_t *)
POINTER_OF(CAM_INTF_META_FRAMES_STALLED, metadata));
if (bufferStalled) {
result.result = NULL; //Metadata should not be sent in this case
camera3_notify_msg_t notify_msg;
memset(&notify_msg, 0, sizeof(camera3_notify_msg_t));
notify_msg.type = CAMERA3_MSG_ERROR;
notify_msg.message.error.frame_number = i->frame_number;
notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_REQUEST;
notify_msg.message.error.error_stream = NULL;
ALOGE("%s: Buffer stall observed reporting error", __func__);
mCallbackOps->notify(mCallbackOps, &notify_msg);
} else {
result.result = translateFromHalMetadata(metadata,
i->timestamp, i->request_id, i->jpegMetadata,
i->pipeline_depth);
}
if (i->blob_request) {
{
//Dump tuning metadata if enabled and available
char prop[PROPERTY_VALUE_MAX];
memset(prop, 0, sizeof(prop));
property_get("persist.camera.dumpmetadata", prop, "0");
int32_t enabled = atoi(prop);
if (enabled && metadata->is_tuning_params_valid) {
dumpMetadataToFile(metadata->tuning_params,
mMetaFrameCount,
enabled,
"Snapshot",
frame_number);
}
}
//If it is a blob request then send the metadata to the picture channel
metadata_buffer_t *reproc_meta =
(metadata_buffer_t *)malloc(sizeof(metadata_buffer_t));
if (reproc_meta == NULL) {
ALOGE("%s: Failed to allocate memory for reproc data.", __func__);
goto done_metadata;
}
*reproc_meta = *metadata;
mPictureChannel->queueReprocMetadata(reproc_meta);
}
// Return metadata buffer
mMetadataChannel->bufDone(metadata_buf);
free(metadata_buf);
}
if (!result.result) {
ALOGE("%s: metadata is NULL", __func__);
}
result.frame_number = i->frame_number;
result.num_output_buffers = 0;
result.output_buffers = NULL;
for (List<RequestedBufferInfo>::iterator j = i->buffers.begin();
j != i->buffers.end(); j++) {
if (j->buffer) {
result.num_output_buffers++;
}
}
if (result.num_output_buffers > 0) {
camera3_stream_buffer_t *result_buffers =
new camera3_stream_buffer_t[result.num_output_buffers];
if (!result_buffers) {
ALOGE("%s: Fatal error: out of memory", __func__);
}
size_t result_buffers_idx = 0;
for (List<RequestedBufferInfo>::iterator j = i->buffers.begin();
j != i->buffers.end(); j++) {
if (j->buffer) {
for (List<PendingFrameDropInfo>::iterator m = mPendingFrameDropList.begin();
m != mPendingFrameDropList.end(); m++) {
QCamera3Channel *channel = (QCamera3Channel *)j->buffer->stream->priv;
uint32_t streamID = channel->getStreamID(channel->getStreamTypeMask());
if((m->stream_ID==streamID) && (m->frame_number==frame_number)) {
j->buffer->status=CAMERA3_BUFFER_STATUS_ERROR;
ALOGV("%s: Stream STATUS_ERROR frame_number=%d, streamID=%d",
__func__, frame_number, streamID);
m = mPendingFrameDropList.erase(m);
break;
}
}
for (List<PendingBufferInfo>::iterator k =
mPendingBuffersMap.mPendingBufferList.begin();
k != mPendingBuffersMap.mPendingBufferList.end(); k++) {
if (k->buffer == j->buffer->buffer) {
ALOGV("%s: Found buffer %p in pending buffer List "
"for frame %d, Take it out!!", __func__,
k->buffer, k->frame_number);
mPendingBuffersMap.num_buffers--;
k = mPendingBuffersMap.mPendingBufferList.erase(k);
break;
}
}
result_buffers[result_buffers_idx++] = *(j->buffer);
free(j->buffer);
j->buffer = NULL;
}
}
result.output_buffers = result_buffers;
mCallbackOps->process_capture_result(mCallbackOps, &result);
ALOGV("%s: meta frame_number = %d, capture_time = %lld",
__func__, result.frame_number, i->timestamp);
free_camera_metadata((camera_metadata_t *)result.result);
delete[] result_buffers;
} else {
mCallbackOps->process_capture_result(mCallbackOps, &result);
ALOGV("%s: meta frame_number = %d, capture_time = %lld",
__func__, result.frame_number, i->timestamp);
free_camera_metadata((camera_metadata_t *)result.result);
}
// erase the element from the list
i = mPendingRequestsList.erase(i);
}
done_metadata:
for (List<PendingRequestInfo>::iterator i = mPendingRequestsList.begin();
i != mPendingRequestsList.end() ;i++) {
i->pipeline_depth++;
}
if (!pending_requests)
unblockRequestIfNecessary();
}
/*===========================================================================
* FUNCTION : handleBufferWithLock
*
* DESCRIPTION: Handles image buffer callback with mMutex lock held.
*
* PARAMETERS : @buffer: image buffer for the callback
* @frame_number: frame number of the image buffer
*
* RETURN :
*
*==========================================================================*/
void QCamera3HardwareInterface::handleBufferWithLock(
camera3_stream_buffer_t *buffer, uint32_t frame_number)
{
// If the frame number doesn't exist in the pending request list,
// directly send the buffer to the frameworks, and update pending buffers map
// Otherwise, book-keep the buffer.
List<PendingRequestInfo>::iterator i = mPendingRequestsList.begin();
while (i != mPendingRequestsList.end() && i->frame_number != frame_number){
i++;
}
if (i == mPendingRequestsList.end()) {
// Verify all pending requests frame_numbers are greater
for (List<PendingRequestInfo>::iterator j = mPendingRequestsList.begin();
j != mPendingRequestsList.end(); j++) {
if (j->frame_number < frame_number) {
ALOGE("%s: Error: pending frame number %d is smaller than %d",
__func__, j->frame_number, frame_number);
}
}
camera3_capture_result_t result;
memset(&result, 0, sizeof(camera3_capture_result_t));
result.result = NULL;
result.frame_number = frame_number;
result.num_output_buffers = 1;
result.partial_result = 0;
for (List<PendingFrameDropInfo>::iterator m = mPendingFrameDropList.begin();
m != mPendingFrameDropList.end(); m++) {
QCamera3Channel *channel = (QCamera3Channel *)buffer->stream->priv;
uint32_t streamID = channel->getStreamID(channel->getStreamTypeMask());
if((m->stream_ID==streamID) && (m->frame_number==frame_number)) {
buffer->status=CAMERA3_BUFFER_STATUS_ERROR;
ALOGV("%s: Stream STATUS_ERROR frame_number=%d, streamID=%d",
__func__, frame_number, streamID);
m = mPendingFrameDropList.erase(m);
break;
}
}
result.output_buffers = buffer;
ALOGV("%s: result frame_number = %d, buffer = %p",
__func__, frame_number, buffer->buffer);
for (List<PendingBufferInfo>::iterator k =
mPendingBuffersMap.mPendingBufferList.begin();
k != mPendingBuffersMap.mPendingBufferList.end(); k++ ) {
if (k->buffer == buffer->buffer) {
ALOGV("%s: Found Frame buffer, take it out from list",
__func__);
mPendingBuffersMap.num_buffers--;
k = mPendingBuffersMap.mPendingBufferList.erase(k);
break;
}
}
ALOGV("%s: mPendingBuffersMap.num_buffers = %d",
__func__, mPendingBuffersMap.num_buffers);
mCallbackOps->process_capture_result(mCallbackOps, &result);
} else {
if (i->input_buffer_present) {
camera3_capture_result result;
memset(&result, 0, sizeof(camera3_capture_result_t));
result.result = NULL;
result.frame_number = frame_number;
result.num_output_buffers = 1;
result.output_buffers = buffer;
result.partial_result = 0;
mCallbackOps->process_capture_result(mCallbackOps, &result);
i = mPendingRequestsList.erase(i);
mPendingRequest--;
} else {
for (List<RequestedBufferInfo>::iterator j = i->buffers.begin();
j != i->buffers.end(); j++) {
if (j->stream == buffer->stream) {
if (j->buffer != NULL) {
ALOGE("%s: Error: buffer is already set", __func__);
} else {
j->buffer = (camera3_stream_buffer_t *)malloc(
sizeof(camera3_stream_buffer_t));
*(j->buffer) = *buffer;
ALOGV("%s: cache buffer %p at result frame_number %d",
__func__, buffer, frame_number);
}
}
}
}
}
}
/*===========================================================================
* FUNCTION : unblockRequestIfNecessary
*
* DESCRIPTION: Unblock capture_request if max_buffer hasn't been reached. Note
* that mMutex is held when this function is called.
*
* PARAMETERS :
*
* RETURN :
*
*==========================================================================*/
void QCamera3HardwareInterface::unblockRequestIfNecessary()
{
// Unblock process_capture_request
pthread_cond_signal(&mRequestCond);
}
/*===========================================================================
* FUNCTION : registerStreamBuffers
*
* DESCRIPTION: Register buffers for a given stream with the HAL device.
*
* PARAMETERS :
* @stream_list : streams to be configured
*
* RETURN :
*
*==========================================================================*/
int QCamera3HardwareInterface::registerStreamBuffers(
const camera3_stream_buffer_set_t * /*buffer_set*/)
{
//Deprecated
return NO_ERROR;
}
/*===========================================================================
* FUNCTION : processCaptureRequest
*
* DESCRIPTION: process a capture request from camera service
*
* PARAMETERS :
* @request : request from framework to process
*
* RETURN :
*
*==========================================================================*/
int QCamera3HardwareInterface::processCaptureRequest(
camera3_capture_request_t *request)
{
int rc = NO_ERROR;
int32_t request_id;
CameraMetadata meta;
pthread_mutex_lock(&mMutex);
rc = validateCaptureRequest(request);
if (rc != NO_ERROR) {
ALOGE("%s: incoming request is not valid", __func__);
pthread_mutex_unlock(&mMutex);
return -EINVAL;
}
meta = request->settings;
// For first capture request, send capture intent, and
// stream on all streams
if (mFirstRequest) {
for (size_t i = 0; i < request->num_output_buffers; i++) {
const camera3_stream_buffer_t& output = request->output_buffers[i];
QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
rc = channel->registerBuffer(output.buffer);
if (rc < 0) {
ALOGE("%s: registerBuffer failed",
__func__);
pthread_mutex_unlock(&mMutex);
return -ENODEV;
}
}
if (meta.exists(ANDROID_CONTROL_CAPTURE_INTENT)) {
int32_t hal_version = CAM_HAL_V3;
uint8_t captureIntent =
meta.find(ANDROID_CONTROL_CAPTURE_INTENT).data.u8[0];
memset(mParameters, 0, sizeof(metadata_buffer_t));
mParameters->first_flagged_entry = CAM_INTF_PARM_MAX;
AddSetMetaEntryToBatch(mParameters, CAM_INTF_PARM_HAL_VERSION,
sizeof(hal_version), &hal_version);
AddSetMetaEntryToBatch(mParameters, CAM_INTF_META_CAPTURE_INTENT,
sizeof(captureIntent), &captureIntent);
mCameraHandle->ops->set_parms(mCameraHandle->camera_handle,
mParameters);
}
//First initialize all streams
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
rc = channel->initialize();
if (NO_ERROR != rc) {
ALOGE("%s : Channel initialization failed %d", __func__, rc);
pthread_mutex_unlock(&mMutex);
return -ENODEV;
}
}
if (mSupportChannel) {
rc = mSupportChannel->initialize();
if (rc < 0) {
ALOGE("%s: Support channel initialization failed", __func__);
pthread_mutex_unlock(&mMutex);
return -ENODEV;
}
}
//Then start them.
ALOGD("%s: Start META Channel", __func__);
rc = mMetadataChannel->start();
if (rc < 0) {
ALOGE("%s: Metadata channel start failed", __func__);
pthread_mutex_unlock(&mMutex);
return -ENODEV;
}
if (mSupportChannel) {
rc = mSupportChannel->start();
if (rc < 0) {
ALOGE("%s: Support channel start failed", __func__);
mMetadataChannel->stop();
pthread_mutex_unlock(&mMutex);
return -ENODEV;
}
}
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
ALOGD("%s: Start Regular Channel mask=%d", __func__, channel->getStreamTypeMask());
rc = channel->start();
if (rc < 0) {
ALOGE("%s: Start Regular Channel failed mask=%d", __func__, channel->getStreamTypeMask());
if (mSupportChannel)
mSupportChannel->stop();
mMetadataChannel->stop();
pthread_mutex_unlock(&mMutex);
return -ENODEV;
}
}
}
uint32_t frameNumber = request->frame_number;
cam_stream_ID_t streamID;
if (meta.exists(ANDROID_REQUEST_ID)) {
request_id = meta.find(ANDROID_REQUEST_ID).data.i32[0];
mCurrentRequestId = request_id;
ALOGV("%s: Received request with id: %d",__func__, request_id);
} else if (mFirstRequest || mCurrentRequestId == -1){
ALOGE("%s: Unable to find request id field, \
& no previous id available", __func__);
pthread_mutex_unlock(&mMutex);
return -EINVAL;
} else {
ALOGV("%s: Re-using old request id", __func__);
request_id = mCurrentRequestId;
}
ALOGV("%s: %d, num_output_buffers = %d input_buffer = %p frame_number = %d",
__func__, __LINE__,
request->num_output_buffers,
request->input_buffer,
frameNumber);
// Acquire all request buffers first
streamID.num_streams = 0;
int blob_request = 0;
for (size_t i = 0; i < request->num_output_buffers; i++) {
const camera3_stream_buffer_t& output = request->output_buffers[i];
QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
//Call function to store local copy of jpeg data for encode params.
blob_request = 1;
}
if (output.acquire_fence != -1) {
rc = sync_wait(output.acquire_fence, TIMEOUT_NEVER);
if (rc != OK) {
ALOGE("%s: sync wait failed %d", __func__, rc);
pthread_mutex_unlock(&mMutex);
return rc;
}
}
streamID.streamID[streamID.num_streams] =
channel->getStreamID(channel->getStreamTypeMask());
streamID.num_streams++;
}
if(request->input_buffer == NULL) {
rc = setFrameParameters(request, streamID);
if (rc < 0) {
ALOGE("%s: fail to set frame parameters", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
}
/* Update pending request list and pending buffers map */
PendingRequestInfo pendingRequest;
pendingRequest.frame_number = frameNumber;
pendingRequest.num_buffers = request->num_output_buffers;
pendingRequest.request_id = request_id;
pendingRequest.blob_request = blob_request;
pendingRequest.bNotified = 0;
pendingRequest.input_buffer_present = (request->input_buffer != NULL)? 1 : 0;
pendingRequest.pipeline_depth = 0;
pendingRequest.partial_result_cnt = 0;
extractJpegMetadata(pendingRequest.jpegMetadata, request);
for (size_t i = 0; i < request->num_output_buffers; i++) {
RequestedBufferInfo requestedBuf;
requestedBuf.stream = request->output_buffers[i].stream;
requestedBuf.buffer = NULL;
pendingRequest.buffers.push_back(requestedBuf);
// Add to buffer handle the pending buffers list
PendingBufferInfo bufferInfo;
bufferInfo.frame_number = frameNumber;
bufferInfo.buffer = request->output_buffers[i].buffer;
bufferInfo.stream = request->output_buffers[i].stream;
mPendingBuffersMap.mPendingBufferList.push_back(bufferInfo);
mPendingBuffersMap.num_buffers++;
ALOGV("%s: frame = %d, buffer = %p, stream = %p, stream format = %d",
__func__, frameNumber, bufferInfo.buffer, bufferInfo.stream,
bufferInfo.stream->format);
}
ALOGV("%s: mPendingBuffersMap.num_buffers = %d",
__func__, mPendingBuffersMap.num_buffers);
mPendingRequestsList.push_back(pendingRequest);
if (mFlush) {
pthread_mutex_unlock(&mMutex);
return NO_ERROR;
}
// Notify metadata channel we receive a request
mMetadataChannel->request(NULL, frameNumber);
// Call request on other streams
for (size_t i = 0; i < request->num_output_buffers; i++) {
const camera3_stream_buffer_t& output = request->output_buffers[i];
QCamera3Channel *channel = (QCamera3Channel *)output.stream->priv;
mm_camera_buf_def_t *pInputBuffer = NULL;
if (channel == NULL) {
ALOGE("%s: invalid channel pointer for stream", __func__);
continue;
}
if (output.stream->format == HAL_PIXEL_FORMAT_BLOB) {
QCamera3RegularChannel* inputChannel = NULL;
if(request->input_buffer != NULL){
//Try to get the internal format
inputChannel = (QCamera3RegularChannel*)
request->input_buffer->stream->priv;
if(inputChannel == NULL ){
ALOGE("%s: failed to get input channel handle", __func__);
} else {
pInputBuffer =
inputChannel->getInternalFormatBuffer(
request->input_buffer->buffer);
ALOGD("%s: Input buffer dump",__func__);
ALOGD("Stream id: %d", pInputBuffer->stream_id);
ALOGD("streamtype:%d", pInputBuffer->stream_type);
ALOGD("frame len:%d", pInputBuffer->frame_len);
ALOGD("Handle:%p", request->input_buffer->buffer);
}
rc = channel->request(output.buffer, frameNumber,
pInputBuffer, mParameters);
if (rc < 0) {
ALOGE("%s: Fail to request on picture channel", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
rc = setReprocParameters(request);
if (rc < 0) {
ALOGE("%s: fail to set reproc parameters", __func__);
pthread_mutex_unlock(&mMutex);
return rc;
}
} else{
ALOGV("%s: %d, snapshot request with buffer %p, frame_number %d", __func__,
__LINE__, output.buffer, frameNumber);
if (mRepeatingRequest) {
rc = channel->request(output.buffer, frameNumber,
NULL, mPrevParameters);
} else {
rc = channel->request(output.buffer, frameNumber,
NULL, mParameters);
}
}
} else {
ALOGV("%s: %d, request with buffer %p, frame_number %d", __func__,
__LINE__, output.buffer, frameNumber);
rc = channel->request(output.buffer, frameNumber);
}
if (rc < 0) {
ALOGE("%s: Fail to issue channel request", __func__);
pthread_mutex_unlock(&mMutex);
return -ENODEV;
}
}
/*set the parameters to backend*/
mCameraHandle->ops->set_parms(mCameraHandle->camera_handle, mParameters);
mFirstRequest = false;
// Added a timed condition wait
struct timespec ts;
uint8_t isValidTimeout = 1;
rc = clock_gettime(CLOCK_REALTIME, &ts);
if (rc < 0) {
isValidTimeout = 0;
ALOGE("%s: Error reading the real time clock!!", __func__);
}
else {
// Make timeout as 5 sec for request to be honored
ts.tv_sec += 5;
}
//Block on conditional variable
mPendingRequest++;
while (mPendingRequest >= kMaxInFlight) {
if (!isValidTimeout) {
ALOGV("%s: Blocking on conditional wait", __func__);
pthread_cond_wait(&mRequestCond, &mMutex);
}
else {
ALOGV("%s: Blocking on timed conditional wait", __func__);
rc = pthread_cond_timedwait(&mRequestCond, &mMutex, &ts);
if (rc == ETIMEDOUT) {
rc = -ENODEV;
ALOGE("%s: Unblocked on timeout!!!!", __func__);
break;
}
}
ALOGV("%s: Unblocked", __func__);
}
pthread_mutex_unlock(&mMutex);
return rc;
}
/*===========================================================================
* FUNCTION : dump
*
* DESCRIPTION:
*
* PARAMETERS :
*
*
* RETURN :
*==========================================================================*/
void QCamera3HardwareInterface::dump(int /*fd*/)
{
/*Enable lock when we implement this function*/
/*
pthread_mutex_lock(&mMutex);
pthread_mutex_unlock(&mMutex);
*/
return;
}
/*===========================================================================
* FUNCTION : flush
*
* DESCRIPTION:
*
* PARAMETERS :
*
*
* RETURN :
*==========================================================================*/
int QCamera3HardwareInterface::flush()
{
unsigned int frameNum = 0;
camera3_notify_msg_t notify_msg;
camera3_capture_result_t result;
camera3_stream_buffer_t *pStream_Buf = NULL;
FlushMap flushMap;
ALOGV("%s: Unblocking Process Capture Request", __func__);
pthread_mutex_lock(&mMutex);
mFlush = true;
pthread_mutex_unlock(&mMutex);
memset(&result, 0, sizeof(camera3_capture_result_t));
// Stop the Streams/Channels
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
channel->stop();
(*it)->status = INVALID;
}
if (mSupportChannel) {
mSupportChannel->stop();
}
if (mMetadataChannel) {
/* If content of mStreamInfo is not 0, there is metadata stream */
mMetadataChannel->stop();
}
// Mutex Lock
pthread_mutex_lock(&mMutex);
// Unblock process_capture_request
mPendingRequest = 0;
pthread_cond_signal(&mRequestCond);
List<PendingRequestInfo>::iterator i = mPendingRequestsList.begin();
frameNum = i->frame_number;
ALOGV("%s: Oldest frame num on mPendingRequestsList = %d",
__func__, frameNum);
// Go through the pending buffers and group them depending
// on frame number
for (List<PendingBufferInfo>::iterator k =
mPendingBuffersMap.mPendingBufferList.begin();
k != mPendingBuffersMap.mPendingBufferList.end();) {
if (k->frame_number < frameNum) {
ssize_t idx = flushMap.indexOfKey(k->frame_number);
if (idx == NAME_NOT_FOUND) {
Vector<PendingBufferInfo> pending;
pending.add(*k);
flushMap.add(k->frame_number, pending);
} else {
Vector<PendingBufferInfo> &pending =
flushMap.editValueFor(k->frame_number);
pending.add(*k);
}
mPendingBuffersMap.num_buffers--;
k = mPendingBuffersMap.mPendingBufferList.erase(k);
} else {
k++;
}
}
for (size_t i = 0; i < flushMap.size(); i++) {
uint32_t frame_number = flushMap.keyAt(i);
const Vector<PendingBufferInfo> &pending = flushMap.valueAt(i);
// Send Error notify to frameworks for each buffer for which
// metadata buffer is already sent
ALOGV("%s: Sending ERROR BUFFER for frame %d number of buffer %d",
__func__, frame_number, pending.size());
pStream_Buf = new camera3_stream_buffer_t[pending.size()];
if (NULL == pStream_Buf) {
ALOGE("%s: No memory for pending buffers array", __func__);
pthread_mutex_unlock(&mMutex);
return NO_MEMORY;
}
for (size_t j = 0; j < pending.size(); j++) {
const PendingBufferInfo &info = pending.itemAt(j);
notify_msg.type = CAMERA3_MSG_ERROR;
notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_BUFFER;
notify_msg.message.error.error_stream = info.stream;
notify_msg.message.error.frame_number = frame_number;
pStream_Buf[j].acquire_fence = -1;
pStream_Buf[j].release_fence = -1;
pStream_Buf[j].buffer = info.buffer;
pStream_Buf[j].status = CAMERA3_BUFFER_STATUS_ERROR;
pStream_Buf[j].stream = info.stream;
mCallbackOps->notify(mCallbackOps, &notify_msg);
ALOGV("%s: notify frame_number = %d stream %p", __func__,
frame_number, info.stream);
}
result.result = NULL;
result.frame_number = frame_number;
result.num_output_buffers = pending.size();
result.output_buffers = pStream_Buf;
mCallbackOps->process_capture_result(mCallbackOps, &result);
delete [] pStream_Buf;
}
ALOGV("%s:Sending ERROR REQUEST for all pending requests", __func__);
flushMap.clear();
for (List<PendingBufferInfo>::iterator k =
mPendingBuffersMap.mPendingBufferList.begin();
k != mPendingBuffersMap.mPendingBufferList.end();) {
ssize_t idx = flushMap.indexOfKey(k->frame_number);
if (idx == NAME_NOT_FOUND) {
Vector<PendingBufferInfo> pending;
pending.add(*k);
flushMap.add(k->frame_number, pending);
} else {
Vector<PendingBufferInfo> &pending =
flushMap.editValueFor(k->frame_number);
pending.add(*k);
}
mPendingBuffersMap.num_buffers--;
k = mPendingBuffersMap.mPendingBufferList.erase(k);
}
// Go through the pending requests info and send error request to framework
for (size_t i = 0; i < flushMap.size(); i++) {
uint32_t frame_number = flushMap.keyAt(i);
const Vector<PendingBufferInfo> &pending = flushMap.valueAt(i);
ALOGV("%s:Sending ERROR REQUEST for frame %d",
__func__, frame_number);
// Send shutter notify to frameworks
notify_msg.type = CAMERA3_MSG_ERROR;
notify_msg.message.error.error_code = CAMERA3_MSG_ERROR_REQUEST;
notify_msg.message.error.error_stream = NULL;
notify_msg.message.error.frame_number = frame_number;
mCallbackOps->notify(mCallbackOps, &notify_msg);
pStream_Buf = new camera3_stream_buffer_t[pending.size()];
if (NULL == pStream_Buf) {
ALOGE("%s: No memory for pending buffers array", __func__);
pthread_mutex_unlock(&mMutex);
return NO_MEMORY;
}
for (size_t j = 0; j < pending.size(); j++) {
const PendingBufferInfo &info = pending.itemAt(j);
pStream_Buf[j].acquire_fence = -1;
pStream_Buf[j].release_fence = -1;
pStream_Buf[j].buffer = info.buffer;
pStream_Buf[j].status = CAMERA3_BUFFER_STATUS_ERROR;
pStream_Buf[j].stream = info.stream;
}
result.num_output_buffers = pending.size();
result.output_buffers = pStream_Buf;
result.result = NULL;
result.frame_number = frame_number;
mCallbackOps->process_capture_result(mCallbackOps, &result);
delete [] pStream_Buf;
}
/* Reset pending buffer list and requests list */
mPendingRequestsList.clear();
/* Reset pending frame Drop list and requests list */
mPendingFrameDropList.clear();
flushMap.clear();
mPendingBuffersMap.num_buffers = 0;
mPendingBuffersMap.mPendingBufferList.clear();
ALOGV("%s: Cleared all the pending buffers ", __func__);
mFlush = false;
mFirstRequest = true;
// Start the Streams/Channels
if (mMetadataChannel) {
/* If content of mStreamInfo is not 0, there is metadata stream */
mMetadataChannel->start();
}
for (List<stream_info_t *>::iterator it = mStreamInfo.begin();
it != mStreamInfo.end(); it++) {
QCamera3Channel *channel = (QCamera3Channel *)(*it)->stream->priv;
channel->start();
}
if (mSupportChannel) {
mSupportChannel->start();
}
pthread_mutex_unlock(&mMutex);
return 0;
}
/*===========================================================================
* FUNCTION : captureResultCb
*
* DESCRIPTION: Callback handler for all capture result
* (streams, as well as metadata)
*
* PARAMETERS :
* @metadata : metadata information
* @buffer : actual gralloc buffer to be returned to frameworks.
* NULL if metadata.
*
* RETURN : NONE
*==========================================================================*/
void QCamera3HardwareInterface::captureResultCb(mm_camera_super_buf_t *metadata_buf,
camera3_stream_buffer_t *buffer, uint32_t frame_number)
{
pthread_mutex_lock(&mMutex);
/* Assume flush() is called before any reprocessing. Send
* notify and result immediately upon receipt of any callback*/
if (mLoopBackResult) {
/* Send notify */
camera3_notify_msg_t notify_msg;
notify_msg.type = CAMERA3_MSG_SHUTTER;
notify_msg.message.shutter.frame_number = mLoopBackResult->frame_number;
notify_msg.message.shutter.timestamp = mLoopBackTimestamp;
mCallbackOps->notify(mCallbackOps, &notify_msg);
/* Send capture result */
mCallbackOps->process_capture_result(mCallbackOps, mLoopBackResult);
free_camera_metadata((camera_metadata_t *)mLoopBackResult->result);
free(mLoopBackResult);
mLoopBackResult = NULL;
}
if (metadata_buf)
handleMetadataWithLock(metadata_buf);
else
handleBufferWithLock(buffer, frame_number);
pthread_mutex_unlock(&mMutex);
return;
}
/*===========================================================================
* FUNCTION : translateFromHalMetadata
*
* DESCRIPTION:
*
* PARAMETERS :
* @metadata : metadata information from callback
*
* RETURN : camera_metadata_t*
* metadata in a format specified by fwk
*==========================================================================*/
camera_metadata_t*
QCamera3HardwareInterface::translateFromHalMetadata(
metadata_buffer_t *metadata,
nsecs_t timestamp,
int32_t request_id,
const CameraMetadata& jpegMetadata,
uint8_t pipeline_depth)
{
CameraMetadata camMetadata;
camera_metadata_t* resultMetadata;
if (jpegMetadata.entryCount())
camMetadata.append(jpegMetadata);
camMetadata.update(ANDROID_SENSOR_TIMESTAMP, &timestamp, 1);
camMetadata.update(ANDROID_REQUEST_ID, &request_id, 1);
camMetadata.update(ANDROID_REQUEST_PIPELINE_DEPTH, &pipeline_depth, 1);
uint8_t curr_entry = GET_FIRST_PARAM_ID(metadata);
uint8_t next_entry;
while (curr_entry != CAM_INTF_PARM_MAX) {
switch (curr_entry) {
case CAM_INTF_META_FRAME_NUMBER:{
int64_t frame_number = *(uint32_t *) POINTER_OF(CAM_INTF_META_FRAME_NUMBER, metadata);
camMetadata.update(ANDROID_SYNC_FRAME_NUMBER, &frame_number, 1);
break;
}
case CAM_INTF_META_FACE_DETECTION:{
cam_face_detection_data_t *faceDetectionInfo =
(cam_face_detection_data_t *)POINTER_OF(CAM_INTF_META_FACE_DETECTION, metadata);
uint8_t numFaces = faceDetectionInfo->num_faces_detected;
int32_t faceIds[MAX_ROI];
uint8_t faceScores[MAX_ROI];
int32_t faceRectangles[MAX_ROI * 4];
int j = 0;
for (int i = 0; i < numFaces; i++) {
faceIds[i] = faceDetectionInfo->faces[i].face_id;
faceScores[i] = faceDetectionInfo->faces[i].score;
convertToRegions(faceDetectionInfo->faces[i].face_boundary,
faceRectangles+j, -1);
j+= 4;
}
if (numFaces <= 0) {
memset(faceIds, 0, sizeof(int32_t) * MAX_ROI);
memset(faceScores, 0, sizeof(uint8_t) * MAX_ROI);
memset(faceRectangles, 0, sizeof(int32_t) * MAX_ROI * 4);
}
camMetadata.update(ANDROID_STATISTICS_FACE_IDS, faceIds, numFaces);
camMetadata.update(ANDROID_STATISTICS_FACE_SCORES, faceScores, numFaces);
camMetadata.update(ANDROID_STATISTICS_FACE_RECTANGLES,
faceRectangles, numFaces*4);
break;
}
case CAM_INTF_META_COLOR_CORRECT_MODE:{
uint8_t *color_correct_mode =
(uint8_t *)POINTER_OF(CAM_INTF_META_COLOR_CORRECT_MODE, metadata);
camMetadata.update(ANDROID_COLOR_CORRECTION_MODE, color_correct_mode, 1);
break;
}
// 3A state is sent in urgent partial result (uses quirk)
case CAM_INTF_META_AEC_STATE:
case CAM_INTF_PARM_AEC_LOCK:
case CAM_INTF_PARM_EV:
case CAM_INTF_PARM_FOCUS_MODE:
case CAM_INTF_META_AF_STATE:
case CAM_INTF_PARM_WHITE_BALANCE:
case CAM_INTF_META_AWB_REGIONS:
case CAM_INTF_META_AWB_STATE:
case CAM_INTF_PARM_AWB_LOCK:
case CAM_INTF_META_PRECAPTURE_TRIGGER:
case CAM_INTF_META_AEC_MODE:
case CAM_INTF_PARM_LED_MODE:
case CAM_INTF_PARM_REDEYE_REDUCTION:
case CAM_INTF_META_AF_TRIGGER_NOTICE: {
ALOGV("%s: 3A metadata: %d, do not process", __func__, curr_entry);
break;
}
case CAM_INTF_META_MODE: {
uint8_t *mode =(uint8_t *)POINTER_OF(CAM_INTF_META_MODE, metadata);
camMetadata.update(ANDROID_CONTROL_MODE, mode, 1);
break;
}
case CAM_INTF_META_EDGE_MODE: {
cam_edge_application_t *edgeApplication =
(cam_edge_application_t *)POINTER_OF(CAM_INTF_META_EDGE_MODE, metadata);
uint8_t edgeStrength = (uint8_t)edgeApplication->sharpness;
camMetadata.update(ANDROID_EDGE_MODE, &(edgeApplication->edge_mode), 1);
camMetadata.update(ANDROID_EDGE_STRENGTH, &edgeStrength, 1);
break;
}
case CAM_INTF_META_FLASH_POWER: {
uint8_t *flashPower =
(uint8_t *)POINTER_OF(CAM_INTF_META_FLASH_POWER, metadata);
camMetadata.update(ANDROID_FLASH_FIRING_POWER, flashPower, 1);
break;
}
case CAM_INTF_META_FLASH_FIRING_TIME: {
int64_t *flashFiringTime =
(int64_t *)POINTER_OF(CAM_INTF_META_FLASH_FIRING_TIME, metadata);
camMetadata.update(ANDROID_FLASH_FIRING_TIME, flashFiringTime, 1);
break;
}
case CAM_INTF_META_FLASH_STATE: {
uint8_t flashState =
*((uint8_t *)POINTER_OF(CAM_INTF_META_FLASH_STATE, metadata));
if (!gCamCapability[mCameraId]->flash_available) {
flashState = ANDROID_FLASH_STATE_UNAVAILABLE;
}
camMetadata.update(ANDROID_FLASH_STATE, &flashState, 1);
break;
}
case CAM_INTF_META_FLASH_MODE:{
uint8_t flashMode = *((uint8_t*)
POINTER_OF(CAM_INTF_META_FLASH_MODE, metadata));
uint8_t fwk_flashMode = lookupFwkName(FLASH_MODES_MAP,
sizeof(FLASH_MODES_MAP),
flashMode);
camMetadata.update(ANDROID_FLASH_MODE, &fwk_flashMode, 1);
break;
}
case CAM_INTF_META_HOTPIXEL_MODE: {
uint8_t *hotPixelMode =
(uint8_t *)POINTER_OF(CAM_INTF_META_HOTPIXEL_MODE, metadata);
camMetadata.update(ANDROID_HOT_PIXEL_MODE, hotPixelMode, 1);
break;
}
case CAM_INTF_META_LENS_APERTURE:{
float *lensAperture =
(float *)POINTER_OF(CAM_INTF_META_LENS_APERTURE, metadata);
camMetadata.update(ANDROID_LENS_APERTURE , lensAperture, 1);
break;
}
case CAM_INTF_META_LENS_FILTERDENSITY: {
float *filterDensity =
(float *)POINTER_OF(CAM_INTF_META_LENS_FILTERDENSITY, metadata);
camMetadata.update(ANDROID_LENS_FILTER_DENSITY , filterDensity, 1);
break;
}
case CAM_INTF_META_LENS_FOCAL_LENGTH:{
float *focalLength =
(float *)POINTER_OF(CAM_INTF_META_LENS_FOCAL_LENGTH, metadata);
camMetadata.update(ANDROID_LENS_FOCAL_LENGTH, focalLength, 1);
break;
}
case CAM_INTF_META_LENS_FOCUS_DISTANCE: {
float *focusDistance =
(float *)POINTER_OF(CAM_INTF_META_LENS_FOCUS_DISTANCE, metadata);
camMetadata.update(ANDROID_LENS_FOCUS_DISTANCE , focusDistance, 1);
break;
}
case CAM_INTF_META_LENS_FOCUS_RANGE: {
float *focusRange =
(float *)POINTER_OF(CAM_INTF_META_LENS_FOCUS_RANGE, metadata);
camMetadata.update(ANDROID_LENS_FOCUS_RANGE , focusRange, 2);
break;
}
case CAM_INTF_META_LENS_STATE: {
uint8_t *lensState = (uint8_t *)POINTER_OF(CAM_INTF_META_LENS_STATE, metadata);
camMetadata.update(ANDROID_LENS_STATE , lensState, 1);
break;
}
case CAM_INTF_META_LENS_OPT_STAB_MODE: {
uint8_t *opticalStab =
(uint8_t *)POINTER_OF(CAM_INTF_META_LENS_OPT_STAB_MODE, metadata);
camMetadata.update(ANDROID_LENS_OPTICAL_STABILIZATION_MODE ,opticalStab, 1);
break;
}
case CAM_INTF_META_NOISE_REDUCTION_MODE: {
uint8_t *noiseRedMode =
(uint8_t *)POINTER_OF(CAM_INTF_META_NOISE_REDUCTION_MODE, metadata);
camMetadata.update(ANDROID_NOISE_REDUCTION_MODE , noiseRedMode, 1);
break;
}
case CAM_INTF_META_NOISE_REDUCTION_STRENGTH: {
uint8_t *noiseRedStrength =
(uint8_t *)POINTER_OF(CAM_INTF_META_NOISE_REDUCTION_STRENGTH, metadata);
camMetadata.update(ANDROID_NOISE_REDUCTION_STRENGTH, noiseRedStrength, 1);
break;
}
case CAM_INTF_META_SCALER_CROP_REGION: {
cam_crop_region_t *hScalerCropRegion =(cam_crop_region_t *)
POINTER_OF(CAM_INTF_META_SCALER_CROP_REGION, metadata);
int32_t scalerCropRegion[4];
scalerCropRegion[0] = hScalerCropRegion->left;
scalerCropRegion[1] = hScalerCropRegion->top;
scalerCropRegion[2] = hScalerCropRegion->width;
scalerCropRegion[3] = hScalerCropRegion->height;
camMetadata.update(ANDROID_SCALER_CROP_REGION, scalerCropRegion, 4);
break;
}
case CAM_INTF_META_AEC_ROI: {
cam_area_t *hAeRegions =
(cam_area_t *)POINTER_OF(CAM_INTF_META_AEC_ROI, metadata);
int32_t aeRegions[5];
convertToRegions(hAeRegions->rect, aeRegions, hAeRegions->weight);
camMetadata.update(ANDROID_CONTROL_AE_REGIONS, aeRegions, 5);
ALOGV("%s: Metadata : ANDROID_CONTROL_AE_REGIONS: FWK: [%d, %d, %d, %d] HAL: [%d, %d, %d, %d]",
__func__, aeRegions[0], aeRegions[1], aeRegions[2], aeRegions[3],
hAeRegions->rect.left, hAeRegions->rect.top, hAeRegions->rect.width, hAeRegions->rect.height);
break;
}
case CAM_INTF_META_AF_ROI:{
/*af regions*/
cam_area_t *hAfRegions =
(cam_area_t *)POINTER_OF(CAM_INTF_META_AF_ROI, metadata);
int32_t afRegions[5];
convertToRegions(hAfRegions->rect, afRegions, hAfRegions->weight);
camMetadata.update(ANDROID_CONTROL_AF_REGIONS, afRegions, 5);
ALOGV("%s: Metadata : ANDROID_CONTROL_AF_REGIONS: FWK: [%d, %d, %d, %d] HAL: [%d, %d, %d, %d]",
__func__, afRegions[0], afRegions[1], afRegions[2], afRegions[3],
hAfRegions->rect.left, hAfRegions->rect.top, hAfRegions->rect.width, hAfRegions->rect.height);
break;
}
case CAM_INTF_META_SENSOR_EXPOSURE_TIME:{
int64_t *sensorExpTime =
(int64_t *)POINTER_OF(CAM_INTF_META_SENSOR_EXPOSURE_TIME, metadata);
ALOGV("%s: sensorExpTime = %lld", __func__, *sensorExpTime);
camMetadata.update(ANDROID_SENSOR_EXPOSURE_TIME , sensorExpTime, 1);
break;
}
case CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW:{
int64_t *sensorRollingShutterSkew =
(int64_t *)POINTER_OF(CAM_INTF_META_SENSOR_ROLLING_SHUTTER_SKEW,
metadata);
ALOGV("%s: sensorRollingShutterSkew = %lld", __func__,
*sensorRollingShutterSkew);
camMetadata.update(ANDROID_SENSOR_ROLLING_SHUTTER_SKEW ,
sensorRollingShutterSkew, 1);
break;
}
case CAM_INTF_META_SENSOR_FRAME_DURATION:{
int64_t *sensorFameDuration =
(int64_t *)POINTER_OF(CAM_INTF_META_SENSOR_FRAME_DURATION, metadata);
ALOGV("%s: sensorFameDuration = %lld", __func__, *sensorFameDuration);
camMetadata.update(ANDROID_SENSOR_FRAME_DURATION, sensorFameDuration, 1);
break;
}
case CAM_INTF_META_SENSOR_SENSITIVITY:{
int32_t sensorSensitivity =
*((int32_t *)POINTER_OF(CAM_INTF_META_SENSOR_SENSITIVITY, metadata));
ALOGV("%s: sensorSensitivity = %d", __func__, sensorSensitivity);
camMetadata.update(ANDROID_SENSOR_SENSITIVITY, &sensorSensitivity, 1);
double noise_profile_S = computeNoiseModelEntryS(sensorSensitivity);
double noise_profile_O = computeNoiseModelEntryO(sensorSensitivity);
double noise_profile[2 * gCamCapability[mCameraId]->num_color_channels];
for(int i = 0; i < 2 * gCamCapability[mCameraId]->num_color_channels; i+=2){
noise_profile[i] = noise_profile_S;
noise_profile[i+1] = noise_profile_O;
}
camMetadata.update(ANDROID_SENSOR_NOISE_PROFILE, noise_profile,
2 * gCamCapability[mCameraId]->num_color_channels);
break;
}
case CAM_INTF_PARM_BESTSHOT_MODE: {
uint8_t *sceneMode =
(uint8_t *)POINTER_OF(CAM_INTF_PARM_BESTSHOT_MODE, metadata);
uint8_t fwkSceneMode =
(uint8_t)lookupFwkName(SCENE_MODES_MAP,
sizeof(SCENE_MODES_MAP)/
sizeof(SCENE_MODES_MAP[0]), *sceneMode);
camMetadata.update(ANDROID_CONTROL_SCENE_MODE,
&fwkSceneMode, 1);
ALOGV("%s: Metadata : ANDROID_CONTROL_SCENE_MODE: %d", __func__, fwkSceneMode);
break;
}
case CAM_INTF_META_SHADING_MODE: {
uint8_t *shadingMode =
(uint8_t *)POINTER_OF(CAM_INTF_META_SHADING_MODE, metadata);
camMetadata.update(ANDROID_SHADING_MODE, shadingMode, 1);
break;
}
case CAM_INTF_META_LENS_SHADING_MAP_MODE: {
uint8_t *shadingMapMode =
(uint8_t *)POINTER_OF(CAM_INTF_META_LENS_SHADING_MAP_MODE, metadata);
camMetadata.update(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE, shadingMapMode, 1);
break;
}
case CAM_INTF_META_STATS_FACEDETECT_MODE: {
uint8_t *faceDetectMode =
(uint8_t *)POINTER_OF(CAM_INTF_META_STATS_FACEDETECT_MODE, metadata);
uint8_t fwk_faceDetectMode = (uint8_t)lookupFwkName(FACEDETECT_MODES_MAP,
sizeof(FACEDETECT_MODES_MAP)/sizeof(FACEDETECT_MODES_MAP[0]),
*faceDetectMode);
/* Downgrade to simple mode */
if (fwk_faceDetectMode == ANDROID_STATISTICS_FACE_DETECT_MODE_FULL) {
fwk_faceDetectMode = ANDROID_STATISTICS_FACE_DETECT_MODE_SIMPLE;
}
camMetadata.update(ANDROID_STATISTICS_FACE_DETECT_MODE, &fwk_faceDetectMode, 1);
break;
}
case CAM_INTF_META_STATS_HISTOGRAM_MODE: {
uint8_t *histogramMode =
(uint8_t *)POINTER_OF(CAM_INTF_META_STATS_HISTOGRAM_MODE, metadata);
camMetadata.update(ANDROID_STATISTICS_HISTOGRAM_MODE, histogramMode, 1);
break;
}
case CAM_INTF_META_STATS_SHARPNESS_MAP_MODE:{
uint8_t *sharpnessMapMode =
(uint8_t *)POINTER_OF(CAM_INTF_META_STATS_SHARPNESS_MAP_MODE, metadata);
camMetadata.update(ANDROID_STATISTICS_SHARPNESS_MAP_MODE,
sharpnessMapMode, 1);
break;
}
case CAM_INTF_META_STATS_SHARPNESS_MAP:{
cam_sharpness_map_t *sharpnessMap = (cam_sharpness_map_t *)
POINTER_OF(CAM_INTF_META_STATS_SHARPNESS_MAP, metadata);
camMetadata.update(ANDROID_STATISTICS_SHARPNESS_MAP,
(int32_t*)sharpnessMap->sharpness,
CAM_MAX_MAP_WIDTH*CAM_MAX_MAP_HEIGHT);
break;
}
case CAM_INTF_META_LENS_SHADING_MAP: {
cam_lens_shading_map_t *lensShadingMap = (cam_lens_shading_map_t *)
POINTER_OF(CAM_INTF_META_LENS_SHADING_MAP, metadata);
int map_height = gCamCapability[mCameraId]->lens_shading_map_size.height;
int map_width = gCamCapability[mCameraId]->lens_shading_map_size.width;
camMetadata.update(ANDROID_STATISTICS_LENS_SHADING_MAP,
(float*)lensShadingMap->lens_shading,
4*map_width*map_height);
break;
}
case CAM_INTF_META_TONEMAP_MODE: {
uint8_t *toneMapMode =
(uint8_t *)POINTER_OF(CAM_INTF_META_TONEMAP_MODE, metadata);
camMetadata.update(ANDROID_TONEMAP_MODE, toneMapMode, 1);
break;
}
case CAM_INTF_META_TONEMAP_CURVES:{
//Populate CAM_INTF_META_TONEMAP_CURVES
/* ch0 = G, ch 1 = B, ch 2 = R*/
cam_rgb_tonemap_curves *tonemap = (cam_rgb_tonemap_curves *)
POINTER_OF(CAM_INTF_META_TONEMAP_CURVES, metadata);
camMetadata.update(ANDROID_TONEMAP_CURVE_GREEN,
(float*)tonemap->curves[0].tonemap_points,
tonemap->tonemap_points_cnt * 2);
camMetadata.update(ANDROID_TONEMAP_CURVE_BLUE,
(float*)tonemap->curves[1].tonemap_points,
tonemap->tonemap_points_cnt * 2);
camMetadata.update(ANDROID_TONEMAP_CURVE_RED,
(float*)tonemap->curves[2].tonemap_points,
tonemap->tonemap_points_cnt * 2);
break;
}
case CAM_INTF_META_COLOR_CORRECT_GAINS:{
cam_color_correct_gains_t *colorCorrectionGains = (cam_color_correct_gains_t*)
POINTER_OF(CAM_INTF_META_COLOR_CORRECT_GAINS, metadata);
camMetadata.update(ANDROID_COLOR_CORRECTION_GAINS, colorCorrectionGains->gains, 4);
break;
}
case CAM_INTF_META_COLOR_CORRECT_TRANSFORM:{
cam_color_correct_matrix_t *colorCorrectionMatrix = (cam_color_correct_matrix_t*)
POINTER_OF(CAM_INTF_META_COLOR_CORRECT_TRANSFORM, metadata);
camMetadata.update(ANDROID_COLOR_CORRECTION_TRANSFORM,
(camera_metadata_rational_t*)colorCorrectionMatrix->transform_matrix, 3*3);
break;
}
/* DNG file realted metadata */
case CAM_INTF_META_PROFILE_TONE_CURVE: {
cam_profile_tone_curve *toneCurve = (cam_profile_tone_curve *)
POINTER_OF(CAM_INTF_META_PROFILE_TONE_CURVE, metadata);
camMetadata.update(ANDROID_SENSOR_PROFILE_TONE_CURVE,
(float*)toneCurve->curve.tonemap_points,
toneCurve->tonemap_points_cnt * 2);
break;
}
case CAM_INTF_META_PRED_COLOR_CORRECT_GAINS:{
cam_color_correct_gains_t *predColorCorrectionGains = (cam_color_correct_gains_t*)
POINTER_OF(CAM_INTF_META_PRED_COLOR_CORRECT_GAINS, metadata);
camMetadata.update(ANDROID_STATISTICS_PREDICTED_COLOR_GAINS,
predColorCorrectionGains->gains, 4);
break;
}
case CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM:{
cam_color_correct_matrix_t *predColorCorrectionMatrix = (cam_color_correct_matrix_t*)
POINTER_OF(CAM_INTF_META_PRED_COLOR_CORRECT_TRANSFORM, metadata);
camMetadata.update(ANDROID_STATISTICS_PREDICTED_COLOR_TRANSFORM,
(camera_metadata_rational_t*)predColorCorrectionMatrix->transform_matrix, 3*3);
break;
}
case CAM_INTF_META_OTP_WB_GRGB:{
float *otpWbGrGb = (float*) POINTER_OF(CAM_INTF_META_OTP_WB_GRGB, metadata);
camMetadata.update(ANDROID_SENSOR_GREEN_SPLIT, otpWbGrGb, 1);
break;
}
case CAM_INTF_META_BLACK_LEVEL_LOCK:{
uint8_t *blackLevelLock = (uint8_t*)
POINTER_OF(CAM_INTF_META_BLACK_LEVEL_LOCK, metadata);
camMetadata.update(ANDROID_BLACK_LEVEL_LOCK, blackLevelLock, 1);
break;
}
case CAM_INTF_PARM_ANTIBANDING: {
uint8_t *hal_ab_mode =
(uint8_t *)POINTER_OF(CAM_INTF_PARM_ANTIBANDING, metadata);
uint8_t fwk_ab_mode = (uint8_t)lookupFwkName(ANTIBANDING_MODES_MAP,
sizeof(ANTIBANDING_MODES_MAP)/sizeof(ANTIBANDING_MODES_MAP[0]),
*hal_ab_mode);
camMetadata.update(ANDROID_CONTROL_AE_ANTIBANDING_MODE,
&fwk_ab_mode, 1);
break;
}
case CAM_INTF_META_CAPTURE_INTENT:{
uint8_t *captureIntent = (uint8_t*)
POINTER_OF(CAM_INTF_META_CAPTURE_INTENT, metadata);
camMetadata.update(ANDROID_CONTROL_CAPTURE_INTENT, captureIntent, 1);
break;
}
case CAM_INTF_META_SCENE_FLICKER:{
uint8_t *sceneFlicker = (uint8_t*)
POINTER_OF(CAM_INTF_META_SCENE_FLICKER, metadata);
camMetadata.update(ANDROID_STATISTICS_SCENE_FLICKER, sceneFlicker, 1);
break;
}
case CAM_INTF_PARM_EFFECT: {
uint8_t *effectMode = (uint8_t*)
POINTER_OF(CAM_INTF_PARM_EFFECT, metadata);
uint8_t fwk_effectMode = (uint8_t)lookupFwkName(EFFECT_MODES_MAP,
sizeof(EFFECT_MODES_MAP),
*effectMode);
camMetadata.update(ANDROID_CONTROL_EFFECT_MODE, &fwk_effectMode, 1);
break;
}
case CAM_INTF_META_TEST_PATTERN_DATA: {
cam_test_pattern_data_t *testPatternData = (cam_test_pattern_data_t *)
POINTER_OF(CAM_INTF_META_TEST_PATTERN_DATA, metadata);